initial working

This commit is contained in:
2025-10-31 21:00:14 +01:00
commit c850cc6e7e
212 changed files with 24622 additions and 0 deletions

13
scripts/create_cloud_init_.sh Executable file
View File

@@ -0,0 +1,13 @@
SSH_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOyva+cul3WOW3ct53a0QMRTkhtKvA2QpJI0p8bv48tH alex@alex-XPS-15-9570"
SSH_KEY_FILE="/tmp/200_id_rsa.pub"
echo "$SSH_PUBLIC_KEY" > "$SSH_KEY_FILE"
# Esegui la configurazione completa (usando lo storage 'local' per gli snippet)
qm set 200 \
--ciuser root \
--sshkeys "$SSH_KEY_FILE" \
--ipconfig0 "ip=192.168.1.200/24,gw=192.168.1.1" \
--nameserver "8.8.8.8"
# Pulisci il file SSH temporaneo
rm "$SSH_KEY_FILE"

111
scripts/diagnose-vm-storage.sh Executable file
View File

@@ -0,0 +1,111 @@
#!/bin/bash
# diagnose-vm-storage.sh
# Script per identificare dove sono i dischi delle VM
VM_ID=${1:-201}
echo "=== VM Storage Diagnostic Tool ==="
echo "VM ID: $VM_ID"
echo ""
# Check se la VM esiste
if ! qm status $VM_ID &>/dev/null; then
echo "❌ VM $VM_ID does not exist!"
exit 1
fi
echo "✓ VM $VM_ID exists"
echo ""
# Mostra configurazione completa
echo "📋 VM Configuration:"
qm config $VM_ID
echo ""
# Estrai info disco
echo "💾 Disk Information:"
DISK_LINE=$(qm config $VM_ID | grep -E "^(scsi|ide|virtio|sata)0:")
echo "$DISK_LINE"
echo ""
# Parse disk info
STORAGE=$(echo "$DISK_LINE" | cut -d: -f2 | cut -d, -f1 | xargs)
echo "Storage location: $STORAGE"
echo ""
# Check tipo di storage
if [[ $STORAGE == local-lvm:* ]]; then
echo "🔍 Storage type: LVM"
DISK_NAME=$(echo $STORAGE | cut -d: -f2)
LVM_PATH="/dev/pve/$DISK_NAME"
echo "Expected LVM path: $LVM_PATH"
if [ -e "$LVM_PATH" ]; then
echo "✓ LVM volume exists"
lvs | grep vm-$VM_ID
else
echo "❌ LVM volume NOT found"
echo "Available LVM volumes:"
lvs
fi
elif [[ $STORAGE == local:* ]]; then
echo "🔍 Storage type: Directory/File"
DISK_NAME=$(echo $STORAGE | cut -d: -f2)
FILE_PATH="/var/lib/vz/images/$VM_ID/"
echo "Expected file path: $FILE_PATH"
if [ -d "$FILE_PATH" ]; then
echo "✓ Directory exists"
ls -lh "$FILE_PATH"
# Identifica tipo di file
for FILE in "$FILE_PATH"/*; do
if [ -f "$FILE" ]; then
echo ""
echo "File: $FILE"
file "$FILE"
du -h "$FILE"
fi
done
else
echo "❌ Directory NOT found"
fi
else
echo "🔍 Unknown storage type: $STORAGE"
echo ""
echo "Available storages:"
pvesm status
fi
echo ""
echo "=== All available storages ==="
pvesm status
echo ""
echo "=== Possible disk locations ==="
echo "Checking common paths..."
# Check LVM
echo "LVM volumes:"
lvs 2>/dev/null | grep -E "vm-?$VM_ID" || echo " None found"
# Check file-based
echo ""
echo "File-based images:"
ls -lh /var/lib/vz/images/$VM_ID/ 2>/dev/null || echo " /var/lib/vz/images/$VM_ID/ not found"
# Check other common locations
for DIR in /var/lib/vz/images /mnt/pve/*; do
if [ -d "$DIR/$VM_ID" ]; then
echo ""
echo "Found in: $DIR/$VM_ID/"
ls -lh "$DIR/$VM_ID/"
fi
done
echo ""
echo "=== Diagnostic complete ==="

185
scripts/fix-vm-access.sh Executable file
View File

@@ -0,0 +1,185 @@
#!/bin/bash
# fix-vm-access.sh
# Script per fixare accesso SSH e password su VM che non rispondono
set -e
VM_ID=${1:-201}
SSH_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOyva+cul3WOW3ct53a0QMRTkhtKvA2QpJI0p8bv48tH alex@alex-XPS-15-9570"
ROOT_PASSWORD="TempProxmox123!"
echo "=== VM Access Fix Tool ==="
echo "VM ID: $VM_ID"
echo ""
# Ferma la VM
echo "⏸ Stopping VM $VM_ID..."
qm stop $VM_ID || true
sleep 5
# Trova il disco
echo "🔍 Finding VM disk..."
DISK_INFO=$(qm config $VM_ID | grep -E "^scsi0:" | head -1)
echo "Disk info: $DISK_INFO"
# Estrai il volume
VOLUME=$(echo "$DISK_INFO" | cut -d: -f2 | cut -d, -f1 | xargs)
echo "Volume: $VOLUME"
# Converti il path del volume in device path
STORAGE_NAME=$(echo $VOLUME | cut -d: -f1)
DISK_NAME=$(echo $VOLUME | cut -d: -f2)
# Check storage type
STORAGE_TYPE=$(pvesm status | grep "^$STORAGE_NAME " | awk '{print $2}')
echo "Storage type: $STORAGE_TYPE"
if [[ $STORAGE_TYPE == "zfspool" ]]; then
# ZFS storage
ZFS_POOL=$(cat /etc/pve/storage.cfg | grep -A5 "^zfspool: $STORAGE_NAME" | grep "pool " | awk '{print $2}')
DEVICE_PATH="/dev/zvol/$ZFS_POOL/$DISK_NAME"
echo "ZFS pool: $ZFS_POOL"
elif [[ $STORAGE_TYPE == "lvmthin" ]] || [[ $STORAGE_TYPE == "lvm" ]]; then
# LVM storage
VG_NAME=$(cat /etc/pve/storage.cfg | grep -A5 "^lvmthin: $STORAGE_NAME\|^lvm: $STORAGE_NAME" | grep "vgname " | awk '{print $2}')
DEVICE_PATH="/dev/$VG_NAME/$DISK_NAME"
echo "LVM VG: $VG_NAME"
elif [[ $STORAGE_TYPE == "dir" ]]; then
# Directory storage
DIR_PATH=$(cat /etc/pve/storage.cfg | grep -A5 "^dir: $STORAGE_NAME" | grep "path " | awk '{print $2}')
DEVICE_PATH="$DIR_PATH/images/$VM_ID/$DISK_NAME"
echo "Directory path: $DIR_PATH"
else
echo "❌ Unknown storage type: $STORAGE_TYPE"
exit 1
fi
echo "Device path: $DEVICE_PATH"
if [ ! -e "$DEVICE_PATH" ]; then
echo "❌ Error: Device $DEVICE_PATH not found!"
if [[ $STORAGE_TYPE == "zfspool" ]]; then
echo "Available ZFS volumes:"
zfs list -t volume | grep vm-$VM_ID
else
echo "Available LVM volumes:"
lvs | grep vm-$VM_ID
fi
exit 1
fi
# Crea mount point
MOUNT_POINT="/mnt/vm${VM_ID}_rescue"
mkdir -p "$MOUNT_POINT"
echo "📦 Setting up loop device..."
# Setup device mapper per il disco
kpartx -av "$DEVICE_PATH"
sleep 2
# Trova la partizione root (prova diverse possibilità)
ROOT_PART=""
# Per ZFS e LVM, usa kpartx
if [[ $STORAGE_TYPE == "zfspool" ]] || [[ $STORAGE_TYPE == "lvmthin" ]] || [[ $STORAGE_TYPE == "lvm" ]]; then
# Cerca i mapper devices creati da kpartx
DISK_BASENAME=$(basename "$DEVICE_PATH" | sed 's/-/--/g')
for PART in /dev/mapper/${DISK_BASENAME}p1 \
/dev/mapper/${DISK_BASENAME}p2 \
/dev/mapper/*vm-${VM_ID}*p1 \
/dev/mapper/*vm-${VM_ID}*p2 \
/dev/mapper/*vm--${VM_ID}*p1 \
/dev/mapper/*vm--${VM_ID}*p2; do
if [ -e "$PART" ]; then
echo "Testing partition: $PART"
if mount -o ro "$PART" "$MOUNT_POINT" 2>/dev/null; then
if [ -d "$MOUNT_POINT/root" ] && [ -d "$MOUNT_POINT/etc" ]; then
ROOT_PART="$PART"
umount "$MOUNT_POINT"
break
fi
umount "$MOUNT_POINT"
fi
fi
done
fi
if [ -z "$ROOT_PART" ]; then
echo "❌ Could not find root partition!"
echo "Available mapper devices:"
ls -la /dev/mapper/ | grep vm-${VM_ID}
kpartx -dv "$DEVICE_PATH"
exit 1
fi
echo "✓ Found root partition: $ROOT_PART"
# Monta la partizione
echo "📂 Mounting filesystem..."
mount "$ROOT_PART" "$MOUNT_POINT"
echo "🔧 Fixing access..."
# Configura password
echo "Setting root password..."
echo "root:${ROOT_PASSWORD}" | chroot "$MOUNT_POINT" chpasswd 2>/dev/null || \
echo "root:${ROOT_PASSWORD}" > "$MOUNT_POINT/tmp/setpw.txt"
# Crea directory SSH
mkdir -p "$MOUNT_POINT/root/.ssh"
# Aggiungi chiave SSH
echo "Adding SSH key..."
echo "$SSH_KEY" > "$MOUNT_POINT/root/.ssh/authorized_keys"
# Permessi corretti
chmod 700 "$MOUNT_POINT/root/.ssh"
chmod 600 "$MOUNT_POINT/root/.ssh/authorized_keys"
# Fix SSH config
echo "Configuring SSH..."
if [ -f "$MOUNT_POINT/etc/ssh/sshd_config" ]; then
sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' "$MOUNT_POINT/etc/ssh/sshd_config"
sed -i 's/#*PubkeyAuthentication.*/PubkeyAuthentication yes/' "$MOUNT_POINT/etc/ssh/sshd_config"
fi
# Se c'era un file temporaneo per la password, crea uno script di startup
if [ -f "$MOUNT_POINT/tmp/setpw.txt" ]; then
cat > "$MOUNT_POINT/etc/rc.local" << 'EOFRC'
#!/bin/bash
if [ -f /tmp/setpw.txt ]; then
cat /tmp/setpw.txt | chpasswd
rm /tmp/setpw.txt
fi
exit 0
EOFRC
chmod +x "$MOUNT_POINT/etc/rc.local"
fi
echo "✓ Configuration applied"
# Sync e unmount
sync
sleep 2
echo "📤 Unmounting..."
umount "$MOUNT_POINT"
kpartx -dv "$DEVICE_PATH"
rmdir "$MOUNT_POINT"
echo "🚀 Starting VM..."
qm start $VM_ID
echo ""
echo "✅ Fix completed!"
echo ""
echo "Wait 30 seconds, then try:"
echo " ssh root@192.168.1.$((200 + VM_ID - 200))"
echo " or"
echo " qm terminal $VM_ID"
echo " Login: root"
echo " Password: $ROOT_PASSWORD"
echo ""
echo "⚠️ Remember to change the password after first login!"

320
scripts/provision-ha-cluster.sh Executable file
View File

@@ -0,0 +1,320 @@
#!/bin/bash
# provision-ha-cluster.sh
# Versione definitiva con iniezione nativa Cloud-Init (cicustom) e FIX storage
set -e
# ==================== CONFIGURAZIONE ====================
PVE_NODE="server"
STORAGE="sddmirror" # Storage principale per i dischi VM (supporta Images)
BRIDGE="vmbr0"
# FIX: Due variabili per lo storage
CLOUDINIT_VOL_STORAGE="$STORAGE" # sddmirror: Useremo lo storage principale che supporta i volumi disco Cloud-Init (Images)
SNIPPET_STORAGE="local" # local: Manteniamo 'local' per i file snippet YAML
VM1_ID=201
VM1_NAME="ha-node1"
VM1_IP="192.168.1.201"
VM2_ID=202
VM2_NAME="ha-node2"
VM2_IP="192.168.1.202"
GATEWAY="192.168.1.1"
NETMASK="24"
DNS="8.8.8.8"
VIP="192.168.1.210"
CORES=2
MEMORY=4096
DISK_SIZE="30G"
TEMPLATE_ID=9000
UBUNTU_IMAGE_URL="https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
UBUNTU_IMAGE_NAME="ubuntu-24.04-cloudimg.img"
# IMPORTANTE: Inserisci la TUA chiave SSH pubblica qui
SSH_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOyva+cul3WOW3ct53a0QMRTkhtKvA2QpJI0p8bv48tH alex@alex-XPS-15-9570"
ROOT_PASSWORD="TempProxmox123!"
APP_DIR="/opt/myapp"
# Directory per gli snippet. Deve puntare alla root dello storage 'local' per gli snippet.
SNIPPETS_DIR="/var/lib/vz/snippets"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# ==================== FUNZIONI ====================
print_header() {
echo -e "${BLUE}================================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}================================================${NC}"
}
print_success() { echo -e "${GREEN}$1${NC}"; }
print_warning() { echo -e "${YELLOW}$1${NC}"; }
print_error() { echo -e "${RED}$1${NC}"; }
print_info() { echo -e "${BLUE} $1${NC}"; }
check_command() {
if ! command -v $1 &> /dev/null; then
print_error "$1 non trovato. Installalo: apt install $1"
exit 1
fi
}
# Funzione per generare il file user-data YAML personalizzato (come snippet)
create_custom_user_data() {
local vm_name=$1
local output_file="/tmp/${vm_name}-user-data.yaml"
# Crea user-data YAML
cat > "$output_file" << EOF
#cloud-config
hostname: $vm_name
fqdn: ${vm_name}.local
manage_etc_hosts: true
users:
- name: root
ssh_authorized_keys:
- $SSH_PUBLIC_KEY
lock_passwd: false
shell: /bin/bash
chpasswd:
list: |
root:$ROOT_PASSWORD
expire: false
ssh_pwauth: true
disable_root: false
packages:
- curl
- wget
- git
- htop
- net-tools
- qemu-guest-agent
runcmd:
# Install Docker
- mkdir -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- systemctl enable docker
- systemctl start docker
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
# Configure sysctl
- echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf
- echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
- sysctl -p
# Create app directory
- mkdir -p $APP_DIR
- chown -R root:root $APP_DIR
# Ensure SSH is properly configured
- sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
- sed -i 's/#*PubkeyAuthentication.*/PubkeyAuthentication yes/' /etc/ssh/sshd_config
- systemctl restart sshd
power_state:
mode: reboot
timeout: 300
condition: true
EOF
echo "$output_file"
}
# ==================== SCRIPT PRINCIPALE ====================
print_header "PROVISIONING HA CLUSTER SU PROXMOX v2 (Nativo FIX)"
# Check prerequisites
print_info "Verifica prerequisiti..."
check_command "qm"
print_success "Prerequisiti OK"
# Crea la directory snippet se non esiste (root)
mkdir -p "$SNIPPETS_DIR"
# Distruggi VM esistenti se necessario
for VMID in $VM1_ID $VM2_ID $TEMPLATE_ID; do
if qm status $VMID &>/dev/null; then
print_warning "VM/Template $VMID già esistente!"
read -p "Vuoi eliminarlo e ricrearlo? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
qm stop $VMID || true
qm destroy $VMID
# Pulizia dei file custom
rm -f "${SNIPPETS_DIR}/${VMID}-user-data.yaml"
print_success "VM/Template $VMID eliminato"
else
print_error "Provisioning annullato"
exit 1
fi
fi
done
# ==================== CREA TEMPLATE ====================
print_header "STEP 1: Creazione template Cloud-Init (Nativo)"
if ! qm status $TEMPLATE_ID &>/dev/null; then
cd /tmp
if [ ! -f "$UBUNTU_IMAGE_NAME" ]; then
print_info "Download Ubuntu Cloud Image..."
wget -q --show-progress $UBUNTU_IMAGE_URL -O $UBUNTU_IMAGE_NAME
fi
print_info "Creazione template VM..."
qm create $TEMPLATE_ID --name ubuntu-cloud-template --memory $MEMORY --net0 virtio,bridge=$BRIDGE --cores $CORES
# Importa il disco
qm importdisk $TEMPLATE_ID $UBUNTU_IMAGE_NAME $STORAGE &>/dev/null
qm set $TEMPLATE_ID --scsihw virtio-scsi-pci --scsi0 ${STORAGE}:vm-${TEMPLATE_ID}-disk-0
# Configurazione Cloud-Init:
qm set $TEMPLATE_ID --delete ide0 2>/dev/null || true
qm set $TEMPLATE_ID --delete ide2 2>/dev/null || true
# Aggiungi il drive per cloud-init sul volume che supporta Images (FIX)
qm set $TEMPLATE_ID --ide2 ${CLOUDINIT_VOL_STORAGE}:cloudinit,format=raw
# Imposta configurazioni essenziali
qm set $TEMPLATE_ID --serial0 socket --vga serial0
qm set $TEMPLATE_ID --agent enabled=1
qm set $TEMPLATE_ID --boot c --bootdisk scsi0
# Resize del disco del template
qm resize $TEMPLATE_ID scsi0 $DISK_SIZE &>/dev/null || true
qm template $TEMPLATE_ID
print_success "Template creato e ottimizzato per Cloud-Init nativo"
else
print_info "Template già esistente, skip"
fi
# ==================== CREA VM1 ====================
print_header "STEP 2: Creazione VM1 ($VM1_NAME)"
print_info "Clonazione template..."
qm clone $TEMPLATE_ID $VM1_ID --name $VM1_NAME --full
# 1. Crea il file user-data personalizzato con le tue runcmd
USER_DATA_FILE=$(create_custom_user_data $VM1_NAME)
# 2. Crea il file SSH temporaneo (FIX NECESSARIO per --sshkeys)
SSH_KEY_FILE="/tmp/${VM1_NAME}_id_rsa.pub"
echo "$SSH_PUBLIC_KEY" > "$SSH_KEY_FILE"
print_info "Chiave SSH salvata in $SSH_KEY_FILE per l'iniezione."
# 3. Allega il file user-data personalizzato come snippet (cicustom)
SNIPPET_FILENAME="${VM1_ID}-user-data.yaml"
# 4. Configura VM con TUTTI i dati di Cloud-Init (FIX: usa percorso file SSH e DNS separato)
print_info "Iniezione configurazione Cloud-Init per VM1..."
qm set $VM1_ID \
--ciuser root \
--sshkeys "$SSH_KEY_FILE" \
--ipconfig0 "ip=${VM1_IP}/${NETMASK},gw=${GATEWAY}" \
--nameserver "${DNS}" \
--cicustom "user=${SNIPPET_STORAGE}:snippets/${SNIPPET_FILENAME}"
# 5. Sposta il file user-data nella directory snippets di Proxmox (FIX: root snippets)
mv "$USER_DATA_FILE" "${SNIPPETS_DIR}/${SNIPPET_FILENAME}"
# 6. PULIZIA: Rimuovi il file temporaneo della chiave SSH
rm "$SSH_KEY_FILE"
print_success "VM1 configurata e dati cloud-init iniettati"
# ==================== CREA VM2 ====================
print_header "STEP 3: Creazione VM2 ($VM2_NAME)"
print_info "Clonazione template..."
qm clone $TEMPLATE_ID $VM2_ID --name $VM2_NAME --full
# 1. Crea il file user-data personalizzato con le tue runcmd
USER_DATA_FILE=$(create_custom_user_data $VM2_NAME)
# 2. Crea il file SSH temporaneo (FIX NECESSARIO per --sshkeys)
SSH_KEY_FILE="/tmp/${VM2_NAME}_id_rsa.pub"
echo "$SSH_PUBLIC_KEY" > "$SSH_KEY_FILE"
print_info "Chiave SSH salvata in $SSH_KEY_FILE per l'iniezione."
# 3. Allega il file user-data personalizzato come snippet (cicustom)
SNIPPET_FILENAME="${VM2_ID}-user-data.yaml"
# 4. Configura VM con TUTTI i dati di Cloud-Init (FIX: usa percorso file SSH e DNS separato)
print_info "Iniezione configurazione Cloud-Init per VM2..."
qm set $VM2_ID \
--ciuser root \
--sshkeys "$SSH_KEY_FILE" \
--ipconfig0 "ip=${VM2_IP}/${NETMASK},gw=${GATEWAY}" \
--nameserver "${DNS}" \
--cicustom "user=${SNIPPET_STORAGE}:snippets/${SNIPPET_FILENAME}"
# 5. Sposta il file user-data nella directory snippets di Proxmox (FIX: root snippets)
mv "$USER_DATA_FILE" "${SNIPPETS_DIR}/${SNIPPET_FILENAME}"
# 6. PULIZIA: Rimuovi il file temporaneo della chiave SSH
rm "$SSH_KEY_FILE"
print_success "VM2 configurata e dati cloud-init iniettati"
# ==================== AVVIA VM ====================
print_header "STEP 4: Avvio delle VM"
print_info "Avvio VM1 ($VM1_IP)..."
qm start $VM1_ID
sleep 5
print_info "Avvio VM2 ($VM2_IP)..."
qm start $VM2_ID
sleep 5
print_info "Attendo cloud-init (2-3 minuti). Il primo avvio può richiedere tempo per il resize e le runcmd."
sleep 180
# ==================== RIEPILOGO ====================
print_header "PROVISIONING COMPLETATO! 🎉"
print_info ""
print_info "Riepilogo cluster HA:"
print_info " VM1: $VM1_NAME (ID: $VM1_ID) - ${GREEN}$VM1_IP${NC}"
print_info " VM2: $VM2_NAME (ID: $VM2_ID) - ${GREEN}$VM2_IP${NC}"
print_info " VIP: $VIP"
print_info ""
print_info "Credenziali:"
print_info " User: root"
print_info " Password: $ROOT_PASSWORD"
print_info " SSH Key: configurata"
print_info ""
print_info "Test connessione (attendere il riavvio causato da cloud-init se non funziona subito):"
print_info " ssh root@$VM1_IP"
print_info " ssh root@$VM2_IP"
print_info ""
print_success "Setup completato! Le VM ora hanno IP statico, Docker installato e chiave SSH configurata."

60
scripts/setup-config.sh Executable file
View File

@@ -0,0 +1,60 @@
#!/bin/bash
# setup-config.sh
# Script interattivo per configurare il provisioning
echo "=== Configurazione Cluster HA ==="
echo ""
# Leggi configurazione
read -p "Nome nodo Proxmox [pve]: " PVE_NODE
PVE_NODE=${PVE_NODE:-pve}
read -p "Storage per dischi VM [local-lvm]: " STORAGE
STORAGE=${STORAGE:-local-lvm}
read -p "Bridge di rete [vmbr0]: " BRIDGE
BRIDGE=${BRIDGE:-vmbr0}
read -p "IP VM1 [192.168.1.10]: " VM1_IP
VM1_IP=${VM1_IP:-192.168.1.10}
read -p "IP VM2 [192.168.1.11]: " VM2_IP
VM2_IP=${VM2_IP:-192.168.1.11}
read -p "Virtual IP [192.168.1.100]: " VIP
VIP=${VIP:-192.168.1.100}
read -p "Gateway [192.168.1.1]: " GATEWAY
GATEWAY=${GATEWAY:-192.168.1.1}
read -p "CPU cores per VM [4]: " CORES
CORES=${CORES:-4}
read -p "RAM per VM in MB [8192]: " MEMORY
MEMORY=${MEMORY:-8192}
read -p "Dimensione disco [50G]: " DISK_SIZE
DISK_SIZE=${DISK_SIZE:-50G}
echo ""
echo "Inserisci la tua chiave SSH pubblica:"
read SSH_PUBLIC_KEY
# Salva configurazione
cat > provision-config.env << EOF
PVE_NODE="$PVE_NODE"
STORAGE="$STORAGE"
BRIDGE="$BRIDGE"
VM1_IP="$VM1_IP"
VM2_IP="$VM2_IP"
VIP="$VIP"
GATEWAY="$GATEWAY"
CORES=$CORES
MEMORY=$MEMORY
DISK_SIZE="$DISK_SIZE"
SSH_PUBLIC_KEY="$SSH_PUBLIC_KEY"
EOF
echo ""
echo "✓ Configurazione salvata in provision-config.env"
echo "Esegui: ./provision-ha-cluster.sh"

100
scripts/test-failover.sh Executable file
View File

@@ -0,0 +1,100 @@
#!/bin/bash
# test-failover.sh
# Script per testare il failover automatico
VIP="192.168.1.210"
VM1_IP="192.168.1.201"
VM2_IP="192.168.1.202"
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
echo "=== Test Failover HA ==="
echo ""
# Determina chi ha il VIP
if ssh root@$VM1_IP "ip addr show | grep -q $VIP" &>/dev/null; then
MASTER_VM=$VM1_IP
MASTER_NAME="VM1"
BACKUP_VM=$VM2_IP
BACKUP_NAME="VM2"
elif ssh root@$VM2_IP "ip addr show | grep -q $VIP" &>/dev/null; then
MASTER_VM=$VM2_IP
MASTER_NAME="VM2"
BACKUP_VM=$VM1_IP
BACKUP_NAME="VM1"
else
echo -e "${RED}Errore: nessuna VM ha il VIP!${NC}"
exit 1
fi
echo -e "${GREEN}$MASTER_NAME ($MASTER_VM) è attualmente MASTER${NC}"
echo -e "${YELLOW}$BACKUP_NAME ($BACKUP_VM) è attualmente BACKUP${NC}"
echo ""
read -p "Vuoi simulare un failure del MASTER? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Test annullato"
exit 0
fi
echo ""
echo "1. Stato iniziale:"
echo " VIP su: $MASTER_NAME"
ping -c 1 -W 2 $VIP &>/dev/null && echo -e " ${GREEN}VIP risponde al ping${NC}" || echo -e " ${RED}VIP non risponde${NC}"
echo ""
echo "2. Simulo failure di $MASTER_NAME (stop keepalived)..."
ssh root@$MASTER_VM "docker compose -f /opt/myapp/docker-compose.yml stop keepalived"
echo ""
echo "3. Attendo failover (15 secondi)..."
sleep 15
echo ""
echo "4. Verifico nuovo MASTER:"
if ssh root@$BACKUP_VM "ip addr show | grep -q $VIP" &>/dev/null; then
echo -e " ${GREEN}$BACKUP_NAME ha preso il VIP (FAILOVER RIUSCITO!)${NC}"
else
echo -e " ${RED}$BACKUP_NAME non ha il VIP (FAILOVER FALLITO!)${NC}"
fi
echo ""
echo "5. Test connettività VIP:"
if ping -c 1 -W 2 $VIP &>/dev/null; then
echo -e " ${GREEN}✓ VIP risponde al ping${NC}"
else
echo -e " ${RED}✗ VIP non risponde${NC}"
fi
echo ""
echo "6. Test servizi FTP:"
if timeout 5 bash -c "echo quit | nc $VIP 21" &>/dev/null; then
echo -e " ${GREEN}✓ FTP risponde${NC}"
else
echo -e " ${YELLOW}⚠ FTP non risponde (potrebbe essere normale)${NC}"
fi
echo ""
read -p "Vuoi ripristinare il MASTER originale? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo ""
echo "7. Ripristino $MASTER_NAME..."
ssh root@$MASTER_VM "docker compose -f /opt/myapp/docker-compose.yml start keepalived"
echo " Attendo 15 secondi..."
sleep 15
if ssh root@$MASTER_VM "ip addr show | grep -q $VIP" &>/dev/null; then
echo -e " ${GREEN}$MASTER_NAME ha ripreso il VIP${NC}"
else
echo -e " ${YELLOW}$BACKUP_NAME ha ancora il VIP (normale)${NC}"
fi
fi
echo ""
echo "=== Test completato ==="

130
scripts/verify-cluster.sh Executable file
View File

@@ -0,0 +1,130 @@
#!/bin/bash
# verify-cluster.sh
# Script per verificare lo stato del cluster
VIP="192.168.1.210"
VM1_IP="192.168.1.201"
VM2_IP="192.168.1.202"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
print_test() { echo -n " Testing $1... "; }
print_ok() { echo -e "${GREEN}OK${NC}"; }
print_fail() { echo -e "${RED}FAIL${NC}"; }
print_warn() { echo -e "${YELLOW}WARN${NC}"; }
echo "=== Verifica Cluster HA ==="
echo ""
# Test 1: Ping VM
echo "1. Network connectivity:"
print_test "VM1 ($VM1_IP)"
if ping -c 1 -W 2 $VM1_IP &>/dev/null; then print_ok; else print_fail; fi
print_test "VM2 ($VM2_IP)"
if ping -c 1 -W 2 $VM2_IP &>/dev/null; then print_ok; else print_fail; fi
print_test "VIP ($VIP)"
if ping -c 1 -W 2 $VIP &>/dev/null; then print_ok; else print_fail; fi
# Test 2: SSH
echo ""
echo "2. SSH connectivity:"
print_test "VM1 SSH"
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@$VM1_IP "echo ok" &>/dev/null; then
print_ok
else
print_fail
fi
print_test "VM2 SSH"
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@$VM2_IP "echo ok" &>/dev/null; then
print_ok
else
print_fail
fi
# Test 3: Docker
echo ""
echo "3. Docker status:"
print_test "VM1 Docker"
if ssh -o ConnectTimeout=5 root@$VM1_IP "docker ps" &>/dev/null; then
print_ok
else
print_fail
fi
print_test "VM2 Docker"
if ssh -o ConnectTimeout=5 root@$VM2_IP "docker ps" &>/dev/null; then
print_ok
else
print_fail
fi
# Test 4: Services
echo ""
echo "4. Services:"
check_service() {
local vm_ip=$1
local service=$2
if ssh -o ConnectTimeout=5 root@$vm_ip "docker ps | grep -q $service" &>/dev/null; then
return 0
else
return 1
fi
}
print_test "MySQL on VM1"
if check_service $VM1_IP mysql; then print_ok; else print_warn; fi
print_test "Redis Master on VM1"
if check_service $VM1_IP redis-master; then print_ok; else print_warn; fi
print_test "Redis Slave on VM2"
if check_service $VM2_IP redis-slave; then print_ok; else print_warn; fi
print_test "FTP Server 1"
if check_service $VM1_IP ftp-server-1; then print_ok; else print_warn; fi
print_test "FTP Server 2"
if check_service $VM2_IP ftp-server-2; then print_ok; else print_warn; fi
print_test "HAProxy on VM1"
if check_service $VM1_IP haproxy; then print_ok; else print_warn; fi
print_test "Keepalived on VM1"
if check_service $VM1_IP keepalived; then print_ok; else print_warn; fi
# Test 5: VIP ownership
echo ""
echo "5. Virtual IP ownership:"
if ssh -o ConnectTimeout=5 root@$VM1_IP "ip addr show | grep -q $VIP" &>/dev/null; then
echo -e " ${GREEN}VM1 is MASTER${NC} (has VIP)"
elif ssh -o ConnectTimeout=5 root@$VM2_IP "ip addr show | grep -q $VIP" &>/dev/null; then
echo -e " ${YELLOW}VM2 is MASTER${NC} (has VIP)"
else
echo -e " ${RED}No VM has VIP!${NC}"
fi
# Test 6: Service endpoints
echo ""
echo "6. Service endpoints:"
print_test "FTP port 21"
if nc -zv -w 2 $VIP 21 &>/dev/null; then print_ok; else print_fail; fi
print_test "HAProxy stats 8404"
if nc -zv -w 2 $VIP 8404 &>/dev/null; then print_ok; else print_fail; fi
print_test "MySQL port 3306"
if nc -zv -w 2 $VIP 3306 &>/dev/null; then print_ok; else print_fail; fi
print_test "Redis port 6379"
if nc -zv -w 2 $VIP 6379 &>/dev/null; then print_ok; else print_fail; fi
echo ""
echo "=== Verifica completata ==="