docker image su registry e create con pyinstaller
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -12,5 +12,5 @@ doc_carri.txt
|
||||
ase.egg-info/
|
||||
site/
|
||||
site.zip
|
||||
*/src/
|
||||
vm1/src/
|
||||
.vscode/extensions.json
|
||||
@@ -17,18 +17,25 @@ IMAGE_NAME=${2:-"orchestrator-app"}
|
||||
TAG=${3:-"latest"}
|
||||
DOCKERFILE_TYPE=${4:-"standard"}
|
||||
|
||||
# Determina quale Dockerfile usare
|
||||
# Determina quale Dockerfile usare e quale tag
|
||||
if [[ "$DOCKERFILE_TYPE" == "distroless" ]]; then
|
||||
DOCKERFILE="Dockerfile.distroless"
|
||||
BUILD_TYPE="Distroless (Multi-stage)"
|
||||
# Se il tag è "latest", usa "distroless", altrimenti aggiungi suffisso "-distroless"
|
||||
if [[ "$TAG" == "latest" ]]; then
|
||||
ACTUAL_TAG="distroless"
|
||||
else
|
||||
ACTUAL_TAG="${TAG}-distroless"
|
||||
fi
|
||||
else
|
||||
DOCKERFILE="Dockerfile"
|
||||
BUILD_TYPE="Standard (python:3.12-slim)"
|
||||
ACTUAL_TAG="$TAG"
|
||||
fi
|
||||
|
||||
# Nome completo dell'immagine
|
||||
FULL_IMAGE_NAME="${REGISTRY_URL}/${IMAGE_NAME}:${TAG}"
|
||||
LOCAL_IMAGE_NAME="${IMAGE_NAME}:${TAG}"
|
||||
FULL_IMAGE_NAME="${REGISTRY_URL}/${IMAGE_NAME}:${ACTUAL_TAG}"
|
||||
LOCAL_IMAGE_NAME="${IMAGE_NAME}:${ACTUAL_TAG}"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
@@ -71,11 +78,17 @@ show_usage() {
|
||||
echo " standard - Usa Dockerfile (python:3.12-slim, ~333MB)"
|
||||
echo " distroless - Usa Dockerfile.distroless (gcr.io/distroless, ~180MB, più sicuro)"
|
||||
echo ""
|
||||
echo "Tag automatici:"
|
||||
echo " standard + latest → orchestrator-app:latest"
|
||||
echo " distroless + latest → orchestrator-app:distroless"
|
||||
echo " standard + v1.0 → orchestrator-app:v1.0"
|
||||
echo " distroless + v1.0 → orchestrator-app:v1.0-distroless"
|
||||
echo ""
|
||||
echo "Esempi:"
|
||||
echo " $0 # Build standard locale"
|
||||
echo " $0 registry.example.com:5000 # Registry custom, standard"
|
||||
echo " $0 registry.example.com:5000 my-app latest distroless # Build distroless"
|
||||
echo " $0 192.168.1.204:5000 orchestrator-app v1.0.0 distroless # Produzione distroless"
|
||||
echo " $0 # → orchestrator-app:latest (standard)"
|
||||
echo " $0 192.168.1.204:5000 orchestrator-app latest distroless # → orchestrator-app:distroless"
|
||||
echo " $0 192.168.1.204:5000 orchestrator-app v1.0.0 standard # → orchestrator-app:v1.0.0"
|
||||
echo " $0 192.168.1.204:5000 orchestrator-app v1.0.0 distroless # → orchestrator-app:v1.0.0-distroless"
|
||||
echo ""
|
||||
}
|
||||
|
||||
@@ -118,7 +131,8 @@ print_info " Dockerfile: ${CYAN}$DOCKERFILE${NC}"
|
||||
print_info " Build type: ${CYAN}$BUILD_TYPE${NC}"
|
||||
print_info " Registry: $REGISTRY_URL"
|
||||
print_info " Nome immagine: $IMAGE_NAME"
|
||||
print_info " Tag: $TAG"
|
||||
print_info " Tag richiesto: $TAG"
|
||||
print_info " Tag effettivo: ${CYAN}$ACTUAL_TAG${NC}"
|
||||
print_info " Nome completo: ${CYAN}$FULL_IMAGE_NAME${NC}"
|
||||
print_info ""
|
||||
|
||||
@@ -135,7 +149,7 @@ fi
|
||||
print_header "STEP 1: Build dell'immagine Docker"
|
||||
|
||||
print_info "Inizio build dell'immagine..."
|
||||
print_info "Comando: docker build -f $DOCKERFILE -t $LOCAL_IMAGE_NAME $PROJECT_DIR"
|
||||
print_info "Comando: docker build --no-cache -f $DOCKERFILE -t $LOCAL_IMAGE_NAME $PROJECT_DIR"
|
||||
echo ""
|
||||
|
||||
# Build dell'immagine con Dockerfile specificato
|
||||
|
||||
@@ -1,6 +1,186 @@
|
||||
#!/bin/bash
|
||||
# sync_modifche_vm.sh
|
||||
# Sincronizza i file necessari per lo stack Docker sui server remoti
|
||||
# NOTA: env/ è ora OBBLIGATORIO perché montato come volume esterno (non più nell'immagine)
|
||||
|
||||
rsync -avz --exclude='*.pyc' --exclude '__pycache__/' /home/alex/devel/ASE/src /home/alex/devel/proxmox-ha-setup/vm1/
|
||||
#rsync -avz --exclude='*.pyc' --exclude '__pycache__/' /home/alex/devel/ASE/src /home/alex/devel/proxmox-ha-setup/vm2/
|
||||
rsync -avz -e "ssh -p 2222" /home/alex/devel/proxmox-ha-setup/vm1/ root@192.168.1.201:/opt/ase/
|
||||
rsync -avz -e "ssh -p 2222" /home/alex/devel/proxmox-ha-setup/vm2/ root@192.168.1.202:/opt/ase/
|
||||
set -e
|
||||
|
||||
# Configurazione
|
||||
VM1_IP="192.168.1.201"
|
||||
VM2_IP="192.168.1.202"
|
||||
SSH_PORT="2222"
|
||||
SSH_USER="root"
|
||||
DEST_DIR="/opt/ase"
|
||||
SOURCE_VM1="/home/alex/devel/proxmox-ha-setup/vm1/"
|
||||
SOURCE_VM2="/home/alex/devel/proxmox-ha-setup/vm2/"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_info() { echo -e "${CYAN}ℹ $1${NC}"; }
|
||||
print_success() { echo -e "${GREEN}✓ $1${NC}"; }
|
||||
print_warning() { echo -e "${YELLOW}⚠ $1${NC}"; }
|
||||
|
||||
echo "================================================"
|
||||
echo "Sincronizzazione file Docker stack"
|
||||
echo "================================================"
|
||||
echo ""
|
||||
|
||||
print_info "Configurazione:"
|
||||
print_info " VM1: ${VM1_IP}:${SSH_PORT} → ${DEST_DIR}"
|
||||
print_info " VM2: ${VM2_IP}:${SSH_PORT} → ${DEST_DIR}"
|
||||
echo ""
|
||||
|
||||
# File da ESCLUDERE (non necessari sui server):
|
||||
# - src/ (codice sorgente, ormai nell'immagine come .pyc)
|
||||
# - Dockerfile* (non serve se usi immagini dal registry)
|
||||
# - pyproject.toml (non serve se usi immagini dal registry)
|
||||
# - *.md (documentazione)
|
||||
# - __pycache__/ e *.pyc (generati automaticamente)
|
||||
|
||||
print_warning "IMPORTANTE: env/ sarà sincronizzato (necessario come volume esterno)"
|
||||
echo ""
|
||||
|
||||
read -p "Procedere con la sincronizzazione? (Y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Nn]$ ]]; then
|
||||
print_warning "Sincronizzazione annullata"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_info "Sincronizzazione VM1 (${VM1_IP})..."
|
||||
rsync -avz -e "ssh -p ${SSH_PORT}" \
|
||||
--exclude 'src' \
|
||||
--exclude 'Dockerfile' \
|
||||
--exclude 'Dockerfile.distroless' \
|
||||
--exclude 'pyproject.toml' \
|
||||
--exclude '*.md' \
|
||||
--exclude '*.backup' \
|
||||
--exclude '*.example' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '*.pyc' \
|
||||
"${SOURCE_VM1}" "${SSH_USER}@${VM1_IP}:${DEST_DIR}/"
|
||||
|
||||
print_success "VM1 sincronizzata"
|
||||
echo ""
|
||||
|
||||
print_info "Sincronizzazione VM2 (${VM2_IP})..."
|
||||
rsync -avz -e "ssh -p ${SSH_PORT}" \
|
||||
--exclude 'src' \
|
||||
--exclude 'Dockerfile' \
|
||||
--exclude 'Dockerfile.distroless' \
|
||||
--exclude 'pyproject.toml' \
|
||||
--exclude '*.md' \
|
||||
--exclude '*.backup' \
|
||||
--exclude '*.example' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '*.pyc' \
|
||||
"${SOURCE_VM2}" "${SSH_USER}@${VM2_IP}:${DEST_DIR}/"
|
||||
|
||||
print_success "VM2 sincronizzata"
|
||||
echo ""
|
||||
|
||||
# Configurazione certificati registry privato
|
||||
REGISTRY_CERT_SOURCE="/var/snap/docker/common/etc/docker/certs.d/192.168.1.204:5000/ca.crt"
|
||||
REGISTRY_DOMAIN="192.168.1.204:5000"
|
||||
|
||||
if [ -f "$REGISTRY_CERT_SOURCE" ]; then
|
||||
print_info "Installazione certificati registry privato..."
|
||||
|
||||
# Verifica se Docker è installato tramite Snap o APT
|
||||
print_info "Installazione certificato registry su VM1..."
|
||||
ssh -p "${SSH_PORT}" "${SSH_USER}@${VM1_IP}" << 'EOF'
|
||||
REGISTRY_DOMAIN="192.168.1.204:5000"
|
||||
|
||||
# Verifica se Docker è installato via Snap
|
||||
if [ -d "/var/snap/docker" ]; then
|
||||
echo " → Docker Snap detectato"
|
||||
CERT_DIR="/var/snap/docker/common/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
else
|
||||
echo " → Docker standard detectato"
|
||||
CERT_DIR="/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
fi
|
||||
|
||||
mkdir -p "${CERT_DIR}"
|
||||
echo " → Directory certificati: ${CERT_DIR}"
|
||||
EOF
|
||||
|
||||
# Copia il certificato
|
||||
if ssh -p "${SSH_PORT}" "${SSH_USER}@${VM1_IP}" "[ -d /var/snap/docker ]"; then
|
||||
REMOTE_CERT_DIR="/var/snap/docker/common/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
else
|
||||
REMOTE_CERT_DIR="/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
fi
|
||||
|
||||
ssh -p "${SSH_PORT}" "${SSH_USER}@${VM1_IP}" "mkdir -p ${REMOTE_CERT_DIR}"
|
||||
scp -P "${SSH_PORT}" "${REGISTRY_CERT_SOURCE}" "${SSH_USER}@${VM1_IP}:${REMOTE_CERT_DIR}/ca.crt"
|
||||
print_success "Certificato registry installato su VM1"
|
||||
|
||||
print_info "Installazione certificato registry su VM2..."
|
||||
ssh -p "${SSH_PORT}" "${SSH_USER}@${VM2_IP}" << 'EOF'
|
||||
REGISTRY_DOMAIN="192.168.1.204:5000"
|
||||
|
||||
# Verifica se Docker è installato via Snap
|
||||
if [ -d "/var/snap/docker" ]; then
|
||||
echo " → Docker Snap detectato"
|
||||
CERT_DIR="/var/snap/docker/common/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
else
|
||||
echo " → Docker standard detectato"
|
||||
CERT_DIR="/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
fi
|
||||
|
||||
mkdir -p "${CERT_DIR}"
|
||||
echo " → Directory certificati: ${CERT_DIR}"
|
||||
EOF
|
||||
|
||||
# Copia il certificato
|
||||
if ssh -p "${SSH_PORT}" "${SSH_USER}@${VM2_IP}" "[ -d /var/snap/docker ]"; then
|
||||
REMOTE_CERT_DIR="/var/snap/docker/common/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
else
|
||||
REMOTE_CERT_DIR="/etc/docker/certs.d/${REGISTRY_DOMAIN}"
|
||||
fi
|
||||
|
||||
ssh -p "${SSH_PORT}" "${SSH_USER}@${VM2_IP}" "mkdir -p ${REMOTE_CERT_DIR}"
|
||||
scp -P "${SSH_PORT}" "${REGISTRY_CERT_SOURCE}" "${SSH_USER}@${VM2_IP}:${REMOTE_CERT_DIR}/ca.crt"
|
||||
print_success "Certificato registry installato su VM2"
|
||||
echo ""
|
||||
else
|
||||
print_warning "Certificato registry non trovato: ${REGISTRY_CERT_SOURCE}"
|
||||
print_warning "Salta installazione certificati - il pull dal registry potrebbe fallire"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
print_success "Sincronizzazione completata!"
|
||||
echo ""
|
||||
print_info "File sincronizzati:"
|
||||
print_info " ✓ docker-compose.yml"
|
||||
print_info " ✓ env/ (configurazioni - OBBLIGATORIO)"
|
||||
print_info " ✓ aseftp/ (directory FTP/SFTP)"
|
||||
print_info " ✓ certs/ (certificati applicazione)"
|
||||
print_info " ✓ matlab_func/ (funzioni MATLAB)"
|
||||
print_info " ✓ haproxy.cfg, keepalived*.conf, alloy-config.alloy"
|
||||
print_info " ✓ ssh_host_key* (chiavi SSH)"
|
||||
echo ""
|
||||
print_info "Certificati registry:"
|
||||
if [ -f "$REGISTRY_CERT_SOURCE" ]; then
|
||||
print_info " ✓ Certificato registry installato su VM1 e VM2"
|
||||
print_info " → ${REGISTRY_DOMAIN}"
|
||||
else
|
||||
print_warning " ✗ Certificato registry NON trovato"
|
||||
fi
|
||||
echo ""
|
||||
print_info "File ESCLUSI (non necessari):"
|
||||
print_info " ✗ src/ (codice sorgente, già nell'immagine Docker)"
|
||||
print_info " ✗ Dockerfile* (non serve se usi registry)"
|
||||
print_info " ✗ pyproject.toml (non serve se usi registry)"
|
||||
print_info " ✗ *.md (documentazione)"
|
||||
echo ""
|
||||
print_info "Prossimi passi sui server:"
|
||||
print_info " 1. Verifica certificato registry: docker pull ${REGISTRY_DOMAIN}/orchestrator-app:latest"
|
||||
print_info " 2. Avvia lo stack: cd ${DEST_DIR} && docker-compose up -d"
|
||||
print_info " 3. Verifica log: docker-compose logs -f"
|
||||
echo ""
|
||||
120
vm1/Dockerfile
120
vm1/Dockerfile
@@ -1,34 +1,118 @@
|
||||
FROM python:3.12-slim
|
||||
# Multi-stage build con PyInstaller per protezione totale del codice
|
||||
# Stage 1: Build con PyInstaller
|
||||
FROM python:3.12-slim AS builder
|
||||
|
||||
# Installa uv
|
||||
# Installa uv e PyInstaller
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
||||
|
||||
# Installa binutils richiesto da PyInstaller
|
||||
RUN apt-get update && apt-get install -y binutils && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copia pyproject.toml, codice sorgente e file statici
|
||||
# NOTA: env/ NON viene copiato - sarà montato come volume esterno
|
||||
# Copia i file necessari per la build
|
||||
COPY pyproject.toml ./
|
||||
COPY src/ ./src/
|
||||
COPY certs/ ./certs/
|
||||
COPY matlab_func/ ./matlab_func/
|
||||
|
||||
# Installa le dipendenze
|
||||
RUN uv pip install --system -e .
|
||||
# Installa le dipendenze e PyInstaller
|
||||
RUN uv pip install --system -e . && \
|
||||
uv pip install --system pyinstaller
|
||||
|
||||
# Compila tutti i file Python in bytecode
|
||||
# Usa -OO per rimuovere docstring e assert (ottimizzazione massima)
|
||||
RUN python -OO -m compileall /app/src || true
|
||||
# Compila tutti gli entry point con PyInstaller
|
||||
# Ogni entry point diventa un binario standalone
|
||||
# Include metadata per pacchetti che lo richiedono
|
||||
RUN pyinstaller \
|
||||
--onefile \
|
||||
--name load_orchestrator \
|
||||
--collect-all src \
|
||||
--collect-all certs \
|
||||
--collect-all matlab_func \
|
||||
--collect-all aioftp \
|
||||
--collect-all aiomysql \
|
||||
--collect-all aiofiles \
|
||||
--collect-all aiosmtplib \
|
||||
--collect-all cryptography \
|
||||
--hidden-import=src \
|
||||
--hidden-import=src.utils \
|
||||
--hidden-import=src.refactory_scripts \
|
||||
src/load_orchestrator.py && \
|
||||
pyinstaller \
|
||||
--onefile \
|
||||
--name elab_orchestrator \
|
||||
--collect-all src \
|
||||
--collect-all certs \
|
||||
--collect-all matlab_func \
|
||||
--collect-all aioftp \
|
||||
--collect-all aiomysql \
|
||||
--collect-all aiofiles \
|
||||
--collect-all aiosmtplib \
|
||||
--collect-all cryptography \
|
||||
--hidden-import=src \
|
||||
--hidden-import=src.utils \
|
||||
--hidden-import=src.refactory_scripts \
|
||||
src/elab_orchestrator.py && \
|
||||
pyinstaller \
|
||||
--onefile \
|
||||
--name send_orchestrator \
|
||||
--collect-all src \
|
||||
--collect-all certs \
|
||||
--collect-all matlab_func \
|
||||
--collect-all aioftp \
|
||||
--collect-all aiomysql \
|
||||
--collect-all aiofiles \
|
||||
--collect-all aiosmtplib \
|
||||
--collect-all cryptography \
|
||||
--hidden-import=src \
|
||||
--hidden-import=src.utils \
|
||||
--hidden-import=src.refactory_scripts \
|
||||
src/send_orchestrator.py && \
|
||||
pyinstaller \
|
||||
--onefile \
|
||||
--name ftp_csv_receiver \
|
||||
--collect-all src \
|
||||
--collect-all certs \
|
||||
--collect-all matlab_func \
|
||||
--collect-all aioftp \
|
||||
--collect-all aiomysql \
|
||||
--collect-all aiofiles \
|
||||
--collect-all aiosmtplib \
|
||||
--collect-all cryptography \
|
||||
--hidden-import=src \
|
||||
--hidden-import=src.utils \
|
||||
--hidden-import=src.refactory_scripts \
|
||||
src/ftp_csv_receiver.py
|
||||
|
||||
# Rimuovi tutti i file sorgente .py, lasciando solo i .pyc compilati in __pycache__
|
||||
RUN find /app/src -type f -name "*.py" -delete
|
||||
# Crea directory per runtime (saranno montate come volumi)
|
||||
RUN mkdir -p /app/dist/logs /app/dist/aseftp/csvfs /app/dist/matlab_runtime /app/dist/env
|
||||
|
||||
# Crea directory per i log, FTP, MATLAB e ENV (sarà montata)
|
||||
RUN mkdir -p /app/logs /app/aseftp/csvfs /app/certs /app/matlab_runtime /app/matlab_func /app/env
|
||||
# ====================================================================
|
||||
# STAGE 2: IMMAGINE FINALE (RUNTIME MINIMA)
|
||||
# Contiene solo il binario compilato, nessun codice sorgente.
|
||||
# ====================================================================
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copia tutti i binari compilati
|
||||
COPY --from=builder /app/dist/load_orchestrator ./load_orchestrator
|
||||
COPY --from=builder /app/dist/elab_orchestrator ./elab_orchestrator
|
||||
COPY --from=builder /app/dist/send_orchestrator ./send_orchestrator
|
||||
COPY --from=builder /app/dist/ftp_csv_receiver ./ftp_csv_receiver
|
||||
|
||||
# Copia i dati statici necessari
|
||||
COPY certs/ ./certs/
|
||||
COPY matlab_func/ ./matlab_func/
|
||||
|
||||
# Copia le directory vuote per runtime (saranno montate come volumi)
|
||||
RUN mkdir -p ./logs ./aseftp/csvfs ./matlab_runtime ./env
|
||||
|
||||
# Rendi tutti gli eseguibili executable
|
||||
RUN chmod +x ./load_orchestrator ./elab_orchestrator ./send_orchestrator ./ftp_csv_receiver
|
||||
|
||||
# Variabili ambiente
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONPATH=/app
|
||||
# Disabilita la creazione di nuovi file .pyc a runtime (non necessari dato che abbiamo già i .pyc)
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Il comando verrà specificato nel docker-compose.yml per ogni servizio
|
||||
CMD ["python", "-m", "src.elab_orchestrator"]
|
||||
# Default: elab_orchestrator (sarà sovrascritto da docker-compose)
|
||||
CMD ["./elab_orchestrator"]
|
||||
@@ -18,12 +18,14 @@ COPY matlab_func/ ./matlab_func/
|
||||
RUN uv pip install --python=/usr/local/bin/python3 --target=/app/deps .
|
||||
|
||||
# Compila tutti i file Python in bytecode
|
||||
RUN python -OO -m compileall /app/src || true
|
||||
# Usa -m compileall per generare .pyc standard (non .opt-2.pyc)
|
||||
RUN python -m compileall /app/src
|
||||
|
||||
# Rimuovi tutti i file sorgente .py, lasciando solo i .pyc compilati
|
||||
RUN find /app/src -type f -name "*.py" -delete
|
||||
# Manteniamo i file .py per compatibilità (Python userà comunque i .pyc precompilati)
|
||||
# Nota: Il codice sorgente è visibile ma Python esegue sempre il bytecode .pyc
|
||||
# Per protezione completa del codice, considera l'uso di PyArmor o simili
|
||||
|
||||
# Rimuovi anche i .py dalle dipendenze
|
||||
# Rimuovi i .py dalle dipendenze per ridurre la dimensione
|
||||
RUN find /app/deps -type f -name "*.py" -delete || true
|
||||
|
||||
# Crea directory vuote per runtime (saranno montate come volumi)
|
||||
|
||||
@@ -1,36 +1,11 @@
|
||||
services:
|
||||
mysql:
|
||||
image: mysql:8.0
|
||||
container_name: mysql
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD:-Ase@2025}
|
||||
MYSQL_DATABASE: ${MYSQL_DATABASE:-ase_lar}
|
||||
MYSQL_USER: ${MYSQL_USER:-ase_lar}
|
||||
MYSQL_PASSWORD: ${MYSQL_PASSWORD:-ase_lar}
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- mysql_data:/var/lib/mysql
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "3306:3306"
|
||||
healthcheck:
|
||||
test: [ "CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${MYSQL_ROOT_PASSWORD:-Ase@2025}" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
labels:
|
||||
logging: "alloy"
|
||||
logging_jobname: "mysql"
|
||||
orchestrator-1-load:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-1-load
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.load_orchestrator" ]
|
||||
command: ["./load_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 1
|
||||
TZ: Europe/Rome
|
||||
@@ -44,11 +19,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-2-elab:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-2-elab
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.elab_orchestrator" ]
|
||||
command: ["./elab_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 2
|
||||
TZ: Europe/Rome
|
||||
@@ -62,11 +38,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-3-send:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-3-send
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.send_orchestrator" ]
|
||||
command: ["./send_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 3
|
||||
TZ: Europe/Rome
|
||||
@@ -80,11 +57,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
ftp-server-1:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: ftp-server-1
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
command: ["./ftp_csv_receiver"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 1
|
||||
FTP_MODE: ftp
|
||||
@@ -108,11 +86,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
sftp-server-1:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: sftp-server-1
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
command: ["./ftp_csv_receiver"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 11
|
||||
FTP_MODE: sftp
|
||||
@@ -185,5 +164,4 @@ services:
|
||||
networks:
|
||||
app-network:
|
||||
volumes:
|
||||
mysql_data:
|
||||
app-logs:
|
||||
|
||||
189
vm1/docker-compose.yml.backup
Normal file
189
vm1/docker-compose.yml.backup
Normal file
@@ -0,0 +1,189 @@
|
||||
services:
|
||||
mysql:
|
||||
image: mysql:8.0
|
||||
container_name: mysql
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD:-Ase@2025}
|
||||
MYSQL_DATABASE: ${MYSQL_DATABASE:-ase_lar}
|
||||
MYSQL_USER: ${MYSQL_USER:-ase_lar}
|
||||
MYSQL_PASSWORD: ${MYSQL_PASSWORD:-ase_lar}
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- mysql_data:/var/lib/mysql
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "3306:3306"
|
||||
healthcheck:
|
||||
test: [ "CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${MYSQL_ROOT_PASSWORD:-Ase@2025}" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
labels:
|
||||
logging: "alloy"
|
||||
logging_jobname: "mysql"
|
||||
orchestrator-1-load:
|
||||
build: .
|
||||
container_name: orchestrator-1-load
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.load_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 1
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-2-elab:
|
||||
build: .
|
||||
container_name: orchestrator-2-elab
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.elab_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 2
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-3-send:
|
||||
build: .
|
||||
container_name: orchestrator-3-send
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.send_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 3
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
ftp-server-1:
|
||||
build: .
|
||||
container_name: ftp-server-1
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 1
|
||||
FTP_MODE: ftp
|
||||
TZ: Europe/Rome
|
||||
FTP_PASSIVE_PORT: "40000"
|
||||
FTP_EXTERNAL_IP: ${VIP:-192.168.1.210}
|
||||
# File Processing Behavior
|
||||
# DELETE_AFTER_PROCESSING: "true" # Cancella file dopo elaborazione corretta (default: false = mantiene i file)
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "40000-40499:40000-40499"
|
||||
expose:
|
||||
- "21"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
sftp-server-1:
|
||||
build: .
|
||||
container_name: sftp-server-1
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 11
|
||||
FTP_MODE: sftp
|
||||
FTP_PORT: "22"
|
||||
TZ: Europe/Rome
|
||||
# File Processing Behavior
|
||||
# DELETE_AFTER_PROCESSING: "true" # Cancella file dopo elaborazione corretta (default: false = mantiene i file)
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- ./env:/app/env:ro
|
||||
- ./ssh_host_key:/app/ssh_host_key:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "22:22"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
|
||||
haproxy:
|
||||
image: haproxy:2.8-alpine
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "21:21"
|
||||
- "8404:8404"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
keepalived:
|
||||
image: alpine:latest
|
||||
container_name: keepalived
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_BROADCAST
|
||||
- NET_RAW
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-master.conf:/etc/keepalived/keepalived.conf:ro
|
||||
command: sh -c "apk add --no-cache keepalived && keepalived -n -D -l -f /etc/keepalived/keepalived.conf"
|
||||
alloy:
|
||||
image: grafana/alloy:latest
|
||||
container_name: alloy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- ./alloy-config.alloy:/etc/alloy/config.alloy:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/host/root:ro
|
||||
command:
|
||||
- run
|
||||
- --server.http.listen-addr=0.0.0.0:12345
|
||||
- --storage.path=/var/lib/alloy/data
|
||||
- /etc/alloy/config.alloy
|
||||
ports:
|
||||
- "12345:12345" # Alloy UI
|
||||
networks:
|
||||
- app-network
|
||||
networks:
|
||||
app-network:
|
||||
volumes:
|
||||
mysql_data:
|
||||
app-logs:
|
||||
2
vm1/env/ftp.ini
vendored
2
vm1/env/ftp.ini
vendored
@@ -33,5 +33,5 @@
|
||||
[csv]
|
||||
Infos = IP|Subnet|Gateway
|
||||
|
||||
[ts_pini]:
|
||||
[ts_pini]
|
||||
path_match = [276_208_TS0003]:TS0003|[Neuchatel_CDP]:TS7|[TS0006_EP28]:=|[TS0007_ChesaArcoiris]:=|[TS0006_EP28_3]:=|[TS0006_EP28_4]:TS0006_EP28_4|[TS0006_EP28_5]:TS0006_EP28_5|[TS18800]:=|[Granges_19 100]:=|[Granges_19 200]:=|[Chesa_Arcoiris_2]:=|[TS0006_EP28_1]:=|[TS_PS_Petites_Croisettes]:=|[_Chesa_Arcoiris_1]:=|[TS_test]:=|[TS-VIME]:=
|
||||
|
||||
4
vm1/env/load.ini
vendored
4
vm1/env/load.ini
vendored
@@ -1,5 +1,5 @@
|
||||
[logging]:
|
||||
[logging]
|
||||
logFilename = /app/logs/load_raw_data.log
|
||||
|
||||
[threads]:
|
||||
[threads]
|
||||
max_num = 5
|
||||
@@ -22,7 +22,7 @@ frontend mysql_frontend
|
||||
|
||||
backend mysql_backend
|
||||
mode tcp
|
||||
server mysql1 mysql:3306 check
|
||||
server mysql1 192.168.1.201:3306 check
|
||||
|
||||
frontend ftp_control
|
||||
bind *:21
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
services:
|
||||
orchestrator-4-load:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-4-load
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.load_orchestrator" ]
|
||||
command: ["./load_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 4
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
@@ -17,16 +19,18 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-5-elab:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-5-elab
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.elab_orchestrator" ]
|
||||
command: ["./elab_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 5
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
@@ -34,16 +38,18 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-6-send:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: orchestrator-6-send
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.send_orchestrator" ]
|
||||
command: ["./send_orchestrator"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 6
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
@@ -51,11 +57,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
ftp-server-2:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: ftp-server-2
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
command: ["./ftp_csv_receiver"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 2
|
||||
FTP_MODE: ftp
|
||||
@@ -67,6 +74,7 @@ services:
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- ./env:/app/env:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
@@ -78,11 +86,12 @@ services:
|
||||
labels:
|
||||
logging: "alloy"
|
||||
sftp-server-2:
|
||||
build: .
|
||||
image: 192.168.1.204:5000/orchestrator-app:latest
|
||||
container_name: sftp-server-2
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
command: ["./ftp_csv_receiver"]
|
||||
environment:
|
||||
APP_ENV_PATH: /app
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 12
|
||||
FTP_MODE: sftp
|
||||
@@ -93,6 +102,7 @@ services:
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- ./env:/app/env:ro
|
||||
- ./ssh_host_key:/app/ssh_host_key:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
157
vm2/docker-compose.yml.backup
Normal file
157
vm2/docker-compose.yml.backup
Normal file
@@ -0,0 +1,157 @@
|
||||
services:
|
||||
orchestrator-4-load:
|
||||
build: .
|
||||
container_name: orchestrator-4-load
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.load_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 4
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-5-elab:
|
||||
build: .
|
||||
container_name: orchestrator-5-elab
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.elab_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 5
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
orchestrator-6-send:
|
||||
build: .
|
||||
container_name: orchestrator-6-send
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.send_orchestrator" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
ORCHESTRATOR_ID: 6
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
labels:
|
||||
logging: "alloy"
|
||||
ftp-server-2:
|
||||
build: .
|
||||
container_name: ftp-server-2
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 2
|
||||
FTP_MODE: ftp
|
||||
TZ: Europe/Rome
|
||||
FTP_PASSIVE_PORT: "40000"
|
||||
FTP_EXTERNAL_IP: ${VIP:-192.168.1.210}
|
||||
# File Processing Behavior
|
||||
# DELETE_AFTER_PROCESSING: "true" # Cancella file dopo elaborazione corretta (default: false = mantiene i file)
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "40000-40499:40000-40499"
|
||||
expose:
|
||||
- "21"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
sftp-server-2:
|
||||
build: .
|
||||
container_name: sftp-server-2
|
||||
restart: unless-stopped
|
||||
command: [ "python", "-m", "src.ftp_csv_receiver" ]
|
||||
environment:
|
||||
DB_HOST: ${VIP:-192.168.1.210}
|
||||
FTP_INSTANCE_ID: 12
|
||||
FTP_MODE: sftp
|
||||
FTP_PORT: "22"
|
||||
TZ: Europe/Rome
|
||||
# File Processing Behavior
|
||||
# DELETE_AFTER_PROCESSING: "true" # Cancella file dopo elaborazione corretta (default: false = mantiene i file)
|
||||
volumes:
|
||||
- app-logs:/app/logs
|
||||
- ./aseftp:/app/aseftp
|
||||
- ./ssh_host_key:/app/ssh_host_key:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "22:22"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
|
||||
haproxy:
|
||||
image: haproxy:2.8-alpine
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
- app-network
|
||||
ports:
|
||||
- "21:21"
|
||||
- "8404:8404"
|
||||
labels:
|
||||
logging: "alloy"
|
||||
keepalived:
|
||||
image: alpine:latest
|
||||
container_name: keepalived
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_BROADCAST
|
||||
- NET_RAW
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-backup.conf:/etc/keepalived/keepalived.conf:ro
|
||||
command: sh -c "apk add --no-cache keepalived && keepalived -n -D -l -f /etc/keepalived/keepalived.conf"
|
||||
alloy:
|
||||
image: grafana/alloy:latest
|
||||
container_name: alloy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TZ: Europe/Rome
|
||||
volumes:
|
||||
- ./alloy-config.alloy:/etc/alloy/config.alloy:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/host/root:ro
|
||||
command:
|
||||
- run
|
||||
- --server.http.listen-addr=0.0.0.0:12345
|
||||
- --storage.path=/var/lib/alloy/data
|
||||
- /etc/alloy/config.alloy
|
||||
ports:
|
||||
- "12345:12345" # Alloy UI
|
||||
networks:
|
||||
- app-network
|
||||
networks:
|
||||
app-network:
|
||||
volumes:
|
||||
app-logs:
|
||||
Reference in New Issue
Block a user