initial working

This commit is contained in:
2025-10-31 21:00:14 +01:00
commit c850cc6e7e
212 changed files with 24622 additions and 0 deletions

320
scripts/provision-ha-cluster.sh Executable file
View File

@@ -0,0 +1,320 @@
#!/bin/bash
# provision-ha-cluster.sh
# Versione definitiva con iniezione nativa Cloud-Init (cicustom) e FIX storage
set -e
# ==================== CONFIGURAZIONE ====================
PVE_NODE="server"
STORAGE="sddmirror" # Storage principale per i dischi VM (supporta Images)
BRIDGE="vmbr0"
# FIX: Due variabili per lo storage
CLOUDINIT_VOL_STORAGE="$STORAGE" # sddmirror: Useremo lo storage principale che supporta i volumi disco Cloud-Init (Images)
SNIPPET_STORAGE="local" # local: Manteniamo 'local' per i file snippet YAML
VM1_ID=201
VM1_NAME="ha-node1"
VM1_IP="192.168.1.201"
VM2_ID=202
VM2_NAME="ha-node2"
VM2_IP="192.168.1.202"
GATEWAY="192.168.1.1"
NETMASK="24"
DNS="8.8.8.8"
VIP="192.168.1.210"
CORES=2
MEMORY=4096
DISK_SIZE="30G"
TEMPLATE_ID=9000
UBUNTU_IMAGE_URL="https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
UBUNTU_IMAGE_NAME="ubuntu-24.04-cloudimg.img"
# IMPORTANTE: Inserisci la TUA chiave SSH pubblica qui
SSH_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOyva+cul3WOW3ct53a0QMRTkhtKvA2QpJI0p8bv48tH alex@alex-XPS-15-9570"
ROOT_PASSWORD="TempProxmox123!"
APP_DIR="/opt/myapp"
# Directory per gli snippet. Deve puntare alla root dello storage 'local' per gli snippet.
SNIPPETS_DIR="/var/lib/vz/snippets"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# ==================== FUNZIONI ====================
print_header() {
echo -e "${BLUE}================================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}================================================${NC}"
}
print_success() { echo -e "${GREEN}$1${NC}"; }
print_warning() { echo -e "${YELLOW}$1${NC}"; }
print_error() { echo -e "${RED}$1${NC}"; }
print_info() { echo -e "${BLUE} $1${NC}"; }
check_command() {
if ! command -v $1 &> /dev/null; then
print_error "$1 non trovato. Installalo: apt install $1"
exit 1
fi
}
# Funzione per generare il file user-data YAML personalizzato (come snippet)
create_custom_user_data() {
local vm_name=$1
local output_file="/tmp/${vm_name}-user-data.yaml"
# Crea user-data YAML
cat > "$output_file" << EOF
#cloud-config
hostname: $vm_name
fqdn: ${vm_name}.local
manage_etc_hosts: true
users:
- name: root
ssh_authorized_keys:
- $SSH_PUBLIC_KEY
lock_passwd: false
shell: /bin/bash
chpasswd:
list: |
root:$ROOT_PASSWORD
expire: false
ssh_pwauth: true
disable_root: false
packages:
- curl
- wget
- git
- htop
- net-tools
- qemu-guest-agent
runcmd:
# Install Docker
- mkdir -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- systemctl enable docker
- systemctl start docker
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
# Configure sysctl
- echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf
- echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
- sysctl -p
# Create app directory
- mkdir -p $APP_DIR
- chown -R root:root $APP_DIR
# Ensure SSH is properly configured
- sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
- sed -i 's/#*PubkeyAuthentication.*/PubkeyAuthentication yes/' /etc/ssh/sshd_config
- systemctl restart sshd
power_state:
mode: reboot
timeout: 300
condition: true
EOF
echo "$output_file"
}
# ==================== SCRIPT PRINCIPALE ====================
print_header "PROVISIONING HA CLUSTER SU PROXMOX v2 (Nativo FIX)"
# Check prerequisites
print_info "Verifica prerequisiti..."
check_command "qm"
print_success "Prerequisiti OK"
# Crea la directory snippet se non esiste (root)
mkdir -p "$SNIPPETS_DIR"
# Distruggi VM esistenti se necessario
for VMID in $VM1_ID $VM2_ID $TEMPLATE_ID; do
if qm status $VMID &>/dev/null; then
print_warning "VM/Template $VMID già esistente!"
read -p "Vuoi eliminarlo e ricrearlo? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
qm stop $VMID || true
qm destroy $VMID
# Pulizia dei file custom
rm -f "${SNIPPETS_DIR}/${VMID}-user-data.yaml"
print_success "VM/Template $VMID eliminato"
else
print_error "Provisioning annullato"
exit 1
fi
fi
done
# ==================== CREA TEMPLATE ====================
print_header "STEP 1: Creazione template Cloud-Init (Nativo)"
if ! qm status $TEMPLATE_ID &>/dev/null; then
cd /tmp
if [ ! -f "$UBUNTU_IMAGE_NAME" ]; then
print_info "Download Ubuntu Cloud Image..."
wget -q --show-progress $UBUNTU_IMAGE_URL -O $UBUNTU_IMAGE_NAME
fi
print_info "Creazione template VM..."
qm create $TEMPLATE_ID --name ubuntu-cloud-template --memory $MEMORY --net0 virtio,bridge=$BRIDGE --cores $CORES
# Importa il disco
qm importdisk $TEMPLATE_ID $UBUNTU_IMAGE_NAME $STORAGE &>/dev/null
qm set $TEMPLATE_ID --scsihw virtio-scsi-pci --scsi0 ${STORAGE}:vm-${TEMPLATE_ID}-disk-0
# Configurazione Cloud-Init:
qm set $TEMPLATE_ID --delete ide0 2>/dev/null || true
qm set $TEMPLATE_ID --delete ide2 2>/dev/null || true
# Aggiungi il drive per cloud-init sul volume che supporta Images (FIX)
qm set $TEMPLATE_ID --ide2 ${CLOUDINIT_VOL_STORAGE}:cloudinit,format=raw
# Imposta configurazioni essenziali
qm set $TEMPLATE_ID --serial0 socket --vga serial0
qm set $TEMPLATE_ID --agent enabled=1
qm set $TEMPLATE_ID --boot c --bootdisk scsi0
# Resize del disco del template
qm resize $TEMPLATE_ID scsi0 $DISK_SIZE &>/dev/null || true
qm template $TEMPLATE_ID
print_success "Template creato e ottimizzato per Cloud-Init nativo"
else
print_info "Template già esistente, skip"
fi
# ==================== CREA VM1 ====================
print_header "STEP 2: Creazione VM1 ($VM1_NAME)"
print_info "Clonazione template..."
qm clone $TEMPLATE_ID $VM1_ID --name $VM1_NAME --full
# 1. Crea il file user-data personalizzato con le tue runcmd
USER_DATA_FILE=$(create_custom_user_data $VM1_NAME)
# 2. Crea il file SSH temporaneo (FIX NECESSARIO per --sshkeys)
SSH_KEY_FILE="/tmp/${VM1_NAME}_id_rsa.pub"
echo "$SSH_PUBLIC_KEY" > "$SSH_KEY_FILE"
print_info "Chiave SSH salvata in $SSH_KEY_FILE per l'iniezione."
# 3. Allega il file user-data personalizzato come snippet (cicustom)
SNIPPET_FILENAME="${VM1_ID}-user-data.yaml"
# 4. Configura VM con TUTTI i dati di Cloud-Init (FIX: usa percorso file SSH e DNS separato)
print_info "Iniezione configurazione Cloud-Init per VM1..."
qm set $VM1_ID \
--ciuser root \
--sshkeys "$SSH_KEY_FILE" \
--ipconfig0 "ip=${VM1_IP}/${NETMASK},gw=${GATEWAY}" \
--nameserver "${DNS}" \
--cicustom "user=${SNIPPET_STORAGE}:snippets/${SNIPPET_FILENAME}"
# 5. Sposta il file user-data nella directory snippets di Proxmox (FIX: root snippets)
mv "$USER_DATA_FILE" "${SNIPPETS_DIR}/${SNIPPET_FILENAME}"
# 6. PULIZIA: Rimuovi il file temporaneo della chiave SSH
rm "$SSH_KEY_FILE"
print_success "VM1 configurata e dati cloud-init iniettati"
# ==================== CREA VM2 ====================
print_header "STEP 3: Creazione VM2 ($VM2_NAME)"
print_info "Clonazione template..."
qm clone $TEMPLATE_ID $VM2_ID --name $VM2_NAME --full
# 1. Crea il file user-data personalizzato con le tue runcmd
USER_DATA_FILE=$(create_custom_user_data $VM2_NAME)
# 2. Crea il file SSH temporaneo (FIX NECESSARIO per --sshkeys)
SSH_KEY_FILE="/tmp/${VM2_NAME}_id_rsa.pub"
echo "$SSH_PUBLIC_KEY" > "$SSH_KEY_FILE"
print_info "Chiave SSH salvata in $SSH_KEY_FILE per l'iniezione."
# 3. Allega il file user-data personalizzato come snippet (cicustom)
SNIPPET_FILENAME="${VM2_ID}-user-data.yaml"
# 4. Configura VM con TUTTI i dati di Cloud-Init (FIX: usa percorso file SSH e DNS separato)
print_info "Iniezione configurazione Cloud-Init per VM2..."
qm set $VM2_ID \
--ciuser root \
--sshkeys "$SSH_KEY_FILE" \
--ipconfig0 "ip=${VM2_IP}/${NETMASK},gw=${GATEWAY}" \
--nameserver "${DNS}" \
--cicustom "user=${SNIPPET_STORAGE}:snippets/${SNIPPET_FILENAME}"
# 5. Sposta il file user-data nella directory snippets di Proxmox (FIX: root snippets)
mv "$USER_DATA_FILE" "${SNIPPETS_DIR}/${SNIPPET_FILENAME}"
# 6. PULIZIA: Rimuovi il file temporaneo della chiave SSH
rm "$SSH_KEY_FILE"
print_success "VM2 configurata e dati cloud-init iniettati"
# ==================== AVVIA VM ====================
print_header "STEP 4: Avvio delle VM"
print_info "Avvio VM1 ($VM1_IP)..."
qm start $VM1_ID
sleep 5
print_info "Avvio VM2 ($VM2_IP)..."
qm start $VM2_ID
sleep 5
print_info "Attendo cloud-init (2-3 minuti). Il primo avvio può richiedere tempo per il resize e le runcmd."
sleep 180
# ==================== RIEPILOGO ====================
print_header "PROVISIONING COMPLETATO! 🎉"
print_info ""
print_info "Riepilogo cluster HA:"
print_info " VM1: $VM1_NAME (ID: $VM1_ID) - ${GREEN}$VM1_IP${NC}"
print_info " VM2: $VM2_NAME (ID: $VM2_ID) - ${GREEN}$VM2_IP${NC}"
print_info " VIP: $VIP"
print_info ""
print_info "Credenziali:"
print_info " User: root"
print_info " Password: $ROOT_PASSWORD"
print_info " SSH Key: configurata"
print_info ""
print_info "Test connessione (attendere il riavvio causato da cloud-init se non funziona subito):"
print_info " ssh root@$VM1_IP"
print_info " ssh root@$VM2_IP"
print_info ""
print_success "Setup completato! Le VM ora hanno IP statico, Docker installato e chiave SSH configurata."