Compare commits
18 Commits
572af82852
...
e1eb2d137a
| Author | SHA1 | Date | |
|---|---|---|---|
| e1eb2d137a | |||
| b1962eae27 | |||
| 38e1f9e86a | |||
| 9b647515ee | |||
| 43f8b7c8cb | |||
|
|
eef01d87d0 | ||
| e897b1a027 | |||
| 95a301ae3f | |||
|
|
c017ec6941 | ||
|
|
9e7ef8159b | ||
|
|
3828e04b13 | ||
|
|
d6655babd9 | ||
|
|
dc94395bbc | ||
|
|
699aaefac3 | ||
|
|
277a22d962 | ||
|
|
83a5d988af | ||
|
|
a45ee496e4 | ||
|
|
e54ecfefc1 |
@ -4,15 +4,19 @@
|
||||
exclude_paths:
|
||||
- .cache/
|
||||
- .github/
|
||||
- .gitea/
|
||||
- .ansible/
|
||||
|
||||
# Skip specific rules
|
||||
skip_list:
|
||||
- yaml[line-length] # Allow longer lines in some cases
|
||||
- yaml[document-start] # Allow missing document start in vault files
|
||||
- yaml[truthy] # Allow different truthy values in workflow files
|
||||
- name[casing] # Allow mixed case in task names
|
||||
- args[module] # Skip args rule that causes "file name too long" issues
|
||||
- var-naming[no-role-prefix] # Allow shorter variable names for readability
|
||||
- risky-shell-pipe # Allow shell pipes in maintenance scripts
|
||||
- run-once[play] # Allow strategy: free for parallel execution
|
||||
|
||||
# Warn instead of error for these
|
||||
warn_list:
|
||||
|
||||
@ -5,10 +5,67 @@ on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
lint-and-test:
|
||||
# Check if CI should be skipped based on branch name or commit message
|
||||
# Simple skip pattern: @skipci (case-insensitive)
|
||||
skip-ci-check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should-skip: ${{ steps.check.outputs.skip }}
|
||||
steps:
|
||||
- name: Check out code (for commit message)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Check if CI should be skipped
|
||||
id: check
|
||||
run: |
|
||||
# Simple skip pattern: @skipci (case-insensitive)
|
||||
# Works in branch names and commit messages
|
||||
SKIP_PATTERN="@skipci"
|
||||
|
||||
# Get branch name (works for both push and PR)
|
||||
BRANCH_NAME="${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}"
|
||||
|
||||
# Get commit message (works for both push and PR)
|
||||
COMMIT_MSG="${GITHUB_EVENT_HEAD_COMMIT_MESSAGE:-}"
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
COMMIT_MSG="${GITHUB_EVENT_PULL_REQUEST_HEAD_COMMIT_MESSAGE:-}"
|
||||
fi
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
COMMIT_MSG=$(git log -1 --pretty=%B 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
SKIP=0
|
||||
|
||||
# Check branch name (case-insensitive)
|
||||
if echo "$BRANCH_NAME" | grep -qiF "$SKIP_PATTERN"; then
|
||||
echo "Skipping CI: branch name contains '$SKIP_PATTERN'"
|
||||
SKIP=1
|
||||
fi
|
||||
|
||||
# Check commit message (case-insensitive)
|
||||
if [ $SKIP -eq 0 ] && [ -n "$COMMIT_MSG" ]; then
|
||||
if echo "$COMMIT_MSG" | grep -qiF "$SKIP_PATTERN"; then
|
||||
echo "Skipping CI: commit message contains '$SKIP_PATTERN'"
|
||||
SKIP=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "skip=$SKIP" >> $GITHUB_OUTPUT
|
||||
echo "Branch: $BRANCH_NAME"
|
||||
echo "Commit: ${COMMIT_MSG:0:50}..."
|
||||
echo "Skip CI: $SKIP"
|
||||
|
||||
lint-and-test:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
# Skip push events for non-master branches (they'll be covered by PR events)
|
||||
if: github.event_name == 'pull_request' || github.ref == 'refs/heads/master'
|
||||
container:
|
||||
image: node:20-bullseye
|
||||
steps:
|
||||
@ -26,13 +83,17 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
ansible-validation:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
# Skip push events for non-master branches (they'll be covered by PR events)
|
||||
if: github.event_name == 'pull_request' || github.ref == 'refs/heads/master'
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
steps:
|
||||
- name: Install Node.js for checkout action
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get update && apt-get install -y curl git
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
@ -60,6 +121,8 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
secret-scanning:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: zricethezav/gitleaks:latest
|
||||
@ -78,6 +141,8 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
dependency-scan:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: aquasec/trivy:latest
|
||||
@ -93,6 +158,8 @@ jobs:
|
||||
run: trivy fs --scanners vuln,secret --exit-code 0 .
|
||||
|
||||
sast-scan:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
@ -116,6 +183,8 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
license-check:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: node:20-bullseye
|
||||
@ -136,6 +205,8 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
vault-check:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
@ -159,7 +230,7 @@ jobs:
|
||||
- name: Validate vault files are encrypted
|
||||
run: |
|
||||
echo "Checking for Ansible Vault files..."
|
||||
vault_files=$(find . -name "*vault*.yml" -o -name "*vault*.yaml" | grep -v ".git" || true)
|
||||
vault_files=$(find . -name "*vault*.yml" -o -name "*vault*.yaml" | grep -v ".git" | grep -v ".example" || true)
|
||||
if [ -z "$vault_files" ]; then
|
||||
echo "No vault files found"
|
||||
exit 0
|
||||
@ -182,6 +253,8 @@ jobs:
|
||||
echo "All vault files are properly encrypted!"
|
||||
|
||||
playbook-test:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
@ -218,12 +291,17 @@ jobs:
|
||||
fi
|
||||
done
|
||||
if [ $failed -eq 1 ]; then
|
||||
echo "Some playbooks have errors (this is expected without inventory/vault)"
|
||||
exit 0
|
||||
echo "❌ Some playbooks have syntax errors!"
|
||||
echo "Note: This may be expected if playbooks require inventory/vault, but syntax errors should still be fixed."
|
||||
exit 1
|
||||
else
|
||||
echo "✅ All playbooks passed syntax check"
|
||||
fi
|
||||
continue-on-error: true
|
||||
|
||||
container-scan:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:22.04
|
||||
@ -239,22 +317,43 @@ jobs:
|
||||
|
||||
- name: Install Trivy
|
||||
run: |
|
||||
set -e
|
||||
apt-get update && apt-get install -y wget curl tar
|
||||
# Try multiple download methods for reliability
|
||||
echo "Downloading Trivy..."
|
||||
if wget -q "https://github.com/aquasecurity/trivy/releases/latest/download/trivy_linux_amd64.tar.gz" -O /tmp/trivy.tar.gz 2>&1; then
|
||||
echo "Downloaded tar.gz, extracting..."
|
||||
tar -xzf /tmp/trivy.tar.gz -C /tmp/ trivy
|
||||
mv /tmp/trivy /usr/local/bin/trivy
|
||||
elif wget -q "https://github.com/aquasecurity/trivy/releases/latest/download/trivy_linux_amd64" -O /usr/local/bin/trivy 2>&1; then
|
||||
echo "Downloaded binary directly"
|
||||
else
|
||||
echo "Failed to download Trivy, trying with version detection..."
|
||||
TRIVY_VERSION=$(curl -s https://api.github.com/repos/aquasecurity/trivy/releases/latest | grep tag_name | cut -d '"' -f 4 | sed 's/v//')
|
||||
wget -q "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz" -O /tmp/trivy.tar.gz
|
||||
tar -xzf /tmp/trivy.tar.gz -C /tmp/ trivy
|
||||
mv /tmp/trivy /usr/local/bin/trivy
|
||||
|
||||
# Use a fixed, known-good Trivy version to avoid URL/redirect issues
|
||||
TRIVY_VERSION="0.58.2"
|
||||
TRIVY_URL="https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz"
|
||||
|
||||
echo "Installing Trivy version: ${TRIVY_VERSION}"
|
||||
echo "Downloading from: ${TRIVY_URL}"
|
||||
|
||||
if ! wget --progress=bar:force "${TRIVY_URL}" -O /tmp/trivy.tar.gz 2>&1; then
|
||||
echo "❌ Failed to download Trivy archive"
|
||||
echo "Checking if file was partially downloaded:"
|
||||
ls -lh /tmp/trivy.tar.gz 2>/dev/null || echo "No file found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f /tmp/trivy.tar.gz ] || [ ! -s /tmp/trivy.tar.gz ]; then
|
||||
echo "❌ Downloaded Trivy archive is missing or empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Download complete. File size: $(du -h /tmp/trivy.tar.gz | cut -f1)"
|
||||
echo "Extracting Trivy..."
|
||||
if ! tar -xzf /tmp/trivy.tar.gz -C /tmp/ trivy; then
|
||||
echo "❌ Failed to extract Trivy binary from archive"
|
||||
tar -tzf /tmp/trivy.tar.gz 2>&1 | head -20 || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f /tmp/trivy ]; then
|
||||
echo "❌ Trivy binary not found after extraction"
|
||||
ls -la /tmp/ | grep trivy || ls -la /tmp/ | head -20
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv /tmp/trivy /usr/local/bin/trivy
|
||||
chmod +x /usr/local/bin/trivy
|
||||
/usr/local/bin/trivy --version
|
||||
trivy --version
|
||||
@ -273,27 +372,139 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
sonar-analysis:
|
||||
needs: skip-ci-check
|
||||
if: needs.skip-ci-check.outputs.should-skip != '1'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: sonarsource/sonar-scanner-cli:latest
|
||||
image: ubuntu:22.04
|
||||
env:
|
||||
SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Install Node.js for checkout action
|
||||
run: |
|
||||
apk add --no-cache nodejs npm curl
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Java and SonarScanner
|
||||
run: |
|
||||
set -e
|
||||
apt-get update && apt-get install -y wget curl unzip openjdk-21-jre
|
||||
|
||||
# Use a known working version to avoid download issues
|
||||
SONAR_SCANNER_VERSION="5.0.1.3006"
|
||||
SCANNER_URL="https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux.zip"
|
||||
|
||||
echo "Installing SonarScanner version: ${SONAR_SCANNER_VERSION}"
|
||||
echo "Downloading from: ${SCANNER_URL}"
|
||||
|
||||
# Download with verbose error output
|
||||
if ! wget --progress=bar:force "${SCANNER_URL}" -O /tmp/sonar-scanner.zip 2>&1; then
|
||||
echo "❌ Failed to download SonarScanner"
|
||||
echo "Checking if file was partially downloaded:"
|
||||
ls -lh /tmp/sonar-scanner.zip 2>/dev/null || echo "No file found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify download
|
||||
if [ ! -f /tmp/sonar-scanner.zip ] || [ ! -s /tmp/sonar-scanner.zip ]; then
|
||||
echo "❌ Downloaded file is missing or empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Download complete. File size: $(du -h /tmp/sonar-scanner.zip | cut -f1)"
|
||||
|
||||
echo "Extracting SonarScanner..."
|
||||
if ! unzip -q /tmp/sonar-scanner.zip -d /tmp; then
|
||||
echo "❌ Failed to extract SonarScanner"
|
||||
echo "Archive info:"
|
||||
file /tmp/sonar-scanner.zip || true
|
||||
unzip -l /tmp/sonar-scanner.zip 2>&1 | head -20 || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find the extracted directory (handle both naming conventions)
|
||||
EXTRACTED_DIR=""
|
||||
if [ -d "/tmp/sonar-scanner-${SONAR_SCANNER_VERSION}-linux" ]; then
|
||||
EXTRACTED_DIR="/tmp/sonar-scanner-${SONAR_SCANNER_VERSION}-linux"
|
||||
elif [ -d "/tmp/sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux" ]; then
|
||||
EXTRACTED_DIR="/tmp/sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux"
|
||||
else
|
||||
# Try to find any sonar-scanner directory
|
||||
EXTRACTED_DIR=$(find /tmp -maxdepth 1 -type d -name "*sonar-scanner*" | head -1)
|
||||
fi
|
||||
|
||||
if [ -z "$EXTRACTED_DIR" ] || [ ! -d "$EXTRACTED_DIR" ]; then
|
||||
echo "❌ SonarScanner directory not found after extraction"
|
||||
echo "Contents of /tmp:"
|
||||
ls -la /tmp/ | grep -E "(sonar|zip)" || ls -la /tmp/ | head -20
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found extracted directory: ${EXTRACTED_DIR}"
|
||||
mv "${EXTRACTED_DIR}" /opt/sonar-scanner
|
||||
|
||||
# Create symlink
|
||||
if [ -f /opt/sonar-scanner/bin/sonar-scanner ]; then
|
||||
ln -sf /opt/sonar-scanner/bin/sonar-scanner /usr/local/bin/sonar-scanner
|
||||
chmod +x /opt/sonar-scanner/bin/sonar-scanner
|
||||
chmod +x /usr/local/bin/sonar-scanner
|
||||
else
|
||||
echo "❌ sonar-scanner binary not found in /opt/sonar-scanner/bin/"
|
||||
echo "Contents of /opt/sonar-scanner/bin/:"
|
||||
ls -la /opt/sonar-scanner/bin/ || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Verifying installation..."
|
||||
if ! sonar-scanner --version; then
|
||||
echo "❌ SonarScanner verification failed"
|
||||
echo "PATH: $PATH"
|
||||
which sonar-scanner || echo "sonar-scanner not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ SonarScanner installed successfully"
|
||||
|
||||
- name: Verify SonarQube connection
|
||||
run: |
|
||||
echo "Checking SonarQube connectivity..."
|
||||
if [ -z "$SONAR_HOST_URL" ] || [ -z "$SONAR_TOKEN" ]; then
|
||||
echo "❌ ERROR: SONAR_HOST_URL or SONAR_TOKEN secrets are not set!"
|
||||
echo "Please configure them in: Repository Settings → Actions → Secrets"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Secrets are configured"
|
||||
echo "SonarQube URL: ${SONAR_HOST_URL}"
|
||||
echo "Testing connectivity to SonarQube server..."
|
||||
if curl -f -s -o /dev/null -w "%{http_code}" "${SONAR_HOST_URL}/api/system/status" | grep -q "200"; then
|
||||
echo "✓ SonarQube server is reachable"
|
||||
else
|
||||
echo "⚠️ Warning: Could not verify SonarQube server connectivity"
|
||||
fi
|
||||
|
||||
- name: Run SonarScanner
|
||||
run: |
|
||||
sonar-scanner \
|
||||
-Dsonar.projectKey=ansible-infra \
|
||||
echo "Starting SonarQube analysis..."
|
||||
if ! sonar-scanner \
|
||||
-Dsonar.projectKey=ansible \
|
||||
-Dsonar.sources=. \
|
||||
-Dsonar.host.url=${SONAR_HOST_URL} \
|
||||
-Dsonar.login=${SONAR_TOKEN}
|
||||
-Dsonar.token=${SONAR_TOKEN} \
|
||||
-Dsonar.scm.disabled=true \
|
||||
-Dsonar.python.version=3.10 \
|
||||
-X; then
|
||||
echo ""
|
||||
echo "❌ SonarScanner analysis failed!"
|
||||
echo ""
|
||||
echo "Common issues:"
|
||||
echo " 1. Project 'ansible' doesn't exist in SonarQube"
|
||||
echo " → Create it manually in SonarQube UI"
|
||||
echo " 2. Token doesn't have permission to analyze/create project"
|
||||
echo " → Ensure token has 'Execute Analysis' permission"
|
||||
echo " 3. Token doesn't have 'Create Projects' permission (if project doesn't exist)"
|
||||
echo " → Grant this permission in SonarQube user settings"
|
||||
echo ""
|
||||
echo "Check SonarQube logs for more details."
|
||||
exit 1
|
||||
fi
|
||||
continue-on-error: true
|
||||
|
||||
workflow-summary:
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@ -6,6 +6,16 @@
|
||||
*.tmp
|
||||
*.bak
|
||||
*~
|
||||
vault.yml.bak.*
|
||||
|
||||
# Deploy keys and SSH private keys - NEVER commit these!
|
||||
*_deploy_key
|
||||
*_deploy_key.pub
|
||||
*.pem
|
||||
*.key
|
||||
id_rsa
|
||||
id_ed25519
|
||||
id_ecdsa
|
||||
|
||||
# Python bytecode
|
||||
__pycache__/
|
||||
|
||||
129
Makefile
129
Makefile
@ -16,6 +16,7 @@ PLAYBOOK_LOCAL := playbooks/local.yml
|
||||
PLAYBOOK_MAINTENANCE := playbooks/maintenance.yml
|
||||
PLAYBOOK_TAILSCALE := playbooks/tailscale.yml
|
||||
PLAYBOOK_PROXMOX := playbooks/infrastructure/proxmox-vm.yml
|
||||
PLAYBOOK_PROXMOX_INFO := playbooks/app/proxmox_info.yml
|
||||
|
||||
# Collection and requirement paths
|
||||
COLLECTIONS_REQ := collections/requirements.yml
|
||||
@ -152,6 +153,18 @@ test-syntax: ## Run comprehensive syntax and validation checks
|
||||
fi; \
|
||||
done
|
||||
@echo ""
|
||||
@echo "$(YELLOW)App Project Playbooks:$(RESET)"
|
||||
@for playbook in playbooks/app/site.yml playbooks/app/provision_vms.yml playbooks/app/configure_app.yml playbooks/app/ssh_client_config.yml; do \
|
||||
if [ -f "$$playbook" ]; then \
|
||||
printf " %-25s " "$$playbook"; \
|
||||
if ansible-playbook "$$playbook" --syntax-check >/dev/null 2>&1; then \
|
||||
echo "$(GREEN)✓ OK$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)✗ FAIL$(RESET)"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@echo ""
|
||||
@echo "$(YELLOW)Role Test Playbooks:$(RESET)"
|
||||
@for test_playbook in roles/*/tests/test.yml; do \
|
||||
if [ -f "$$test_playbook" ]; then \
|
||||
@ -195,10 +208,14 @@ test-syntax: ## Run comprehensive syntax and validation checks
|
||||
@for yaml_file in inventories/production/group_vars/all/main.yml; do \
|
||||
if [ -f "$$yaml_file" ]; then \
|
||||
printf " %-25s " "$$yaml_file (YAML)"; \
|
||||
if python3 -c "import yaml; yaml.safe_load(open('$$yaml_file'))" >/dev/null 2>&1; then \
|
||||
echo "$(GREEN)✓ OK$(RESET)"; \
|
||||
if python3 -c "import yaml" >/dev/null 2>&1; then \
|
||||
if python3 -c "import yaml; yaml.safe_load(open('$$yaml_file'))" >/dev/null 2>&1; then \
|
||||
echo "$(GREEN)✓ OK$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)✗ FAIL$(RESET)"; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(RED)✗ FAIL$(RESET)"; \
|
||||
echo "$(YELLOW)⚠ Skipped (PyYAML not installed)$(RESET)"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@ -235,14 +252,22 @@ local: ## Run the local playbook on localhost
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_LOCAL) -K
|
||||
|
||||
# Host-specific targets
|
||||
dev: ## Run on specific host (usage: make dev HOST=dev01)
|
||||
dev: ## Run on specific host (usage: make dev HOST=dev01 [SUDO=true] [SSH_PASS=true])
|
||||
ifndef HOST
|
||||
@echo "$(RED)Error: HOST parameter required$(RESET)"
|
||||
@echo "Usage: make dev HOST=dev01"
|
||||
@echo "Usage: make dev HOST=dev01 [SUDO=true] [SSH_PASS=true]"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Running on host: $(HOST)$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --limit $(HOST)
|
||||
@SSH_FLAGS=""; \
|
||||
SUDO_FLAGS=""; \
|
||||
if [ "$(SSH_PASS)" = "true" ]; then \
|
||||
SSH_FLAGS="-k"; \
|
||||
fi; \
|
||||
if [ "$(SUDO)" = "true" ]; then \
|
||||
SUDO_FLAGS="-K"; \
|
||||
fi; \
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --limit $(HOST) $(ANSIBLE_ARGS) $$SSH_FLAGS $$SUDO_FLAGS
|
||||
|
||||
# Data science role
|
||||
datascience: ## Install data science stack (usage: make datascience HOST=server01)
|
||||
@ -354,12 +379,21 @@ docker: ## Install/configure Docker only
|
||||
@echo "$(YELLOW)Running Docker setup...$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --tags docker
|
||||
|
||||
shell: ## Configure shell only
|
||||
@echo "$(YELLOW)Running shell configuration...$(RESET)"
|
||||
shell: ## Configure shell (usage: make shell [HOST=dev02] [SUDO=true])
|
||||
ifdef HOST
|
||||
@echo "$(YELLOW)Running shell configuration on host: $(HOST)$(RESET)"
|
||||
@if [ "$(SUDO)" = "true" ]; then \
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/shell.yml --limit $(HOST) $(ANSIBLE_ARGS) -K; \
|
||||
else \
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/shell.yml --limit $(HOST) $(ANSIBLE_ARGS); \
|
||||
fi
|
||||
else
|
||||
@echo "$(YELLOW)Running shell configuration on all dev hosts...$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --tags shell
|
||||
endif
|
||||
|
||||
shell-all: ## Configure shell on all shell_hosts (usage: make shell-all)
|
||||
@echo "$(YELLOW)Running shell configuration on all shell hosts...$(RESET)"
|
||||
shell-all: ## Configure shell on all hosts (usage: make shell-all)
|
||||
@echo "$(YELLOW)Running shell configuration on all hosts...$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/shell.yml $(ANSIBLE_ARGS)
|
||||
|
||||
apps: ## Install applications only
|
||||
@ -528,6 +562,81 @@ monitoring: ## Install monitoring tools on all machines
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --tags monitoring
|
||||
@echo "$(GREEN)✓ Monitoring installation complete$(RESET)"
|
||||
|
||||
proxmox-info: ## Show Proxmox VM/LXC info (usage: make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc|qemu|all])
|
||||
@echo "$(YELLOW)Querying Proxmox guest info...$(RESET)"
|
||||
@EXTRA=""; \
|
||||
if [ -n "$(PROJECT)" ]; then EXTRA="$$EXTRA -e app_project=$(PROJECT)"; fi; \
|
||||
if [ "$(ALL)" = "true" ]; then EXTRA="$$EXTRA -e proxmox_info_all=true"; fi; \
|
||||
if [ -n "$(TYPE)" ]; then EXTRA="$$EXTRA -e proxmox_info_type=$(TYPE)"; fi; \
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_PROXMOX_INFO) $$EXTRA
|
||||
|
||||
app-provision: ## Provision app project containers/VMs on Proxmox (usage: make app-provision PROJECT=projectA)
|
||||
ifndef PROJECT
|
||||
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
|
||||
@echo "Usage: make app-provision PROJECT=projectA"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Provisioning app project guests on Proxmox: $(PROJECT)$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/app/provision_vms.yml -e app_project=$(PROJECT)
|
||||
|
||||
app-configure: ## Configure OS + app on project guests (usage: make app-configure PROJECT=projectA)
|
||||
ifndef PROJECT
|
||||
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
|
||||
@echo "Usage: make app-configure PROJECT=projectA"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Configuring app project guests: $(PROJECT)$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/app/configure_app.yml -e app_project=$(PROJECT)
|
||||
|
||||
app: ## Provision + configure app project (usage: make app PROJECT=projectA)
|
||||
ifndef PROJECT
|
||||
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
|
||||
@echo "Usage: make app PROJECT=projectA"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Provisioning + configuring app project: $(PROJECT)$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) playbooks/app/site.yml -e app_project=$(PROJECT)
|
||||
|
||||
# Timeshift snapshot and rollback
|
||||
timeshift-snapshot: ## Create Timeshift snapshot (usage: make timeshift-snapshot HOST=dev02)
|
||||
ifndef HOST
|
||||
@echo "$(RED)Error: HOST parameter required$(RESET)"
|
||||
@echo "Usage: make timeshift-snapshot HOST=dev02"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Creating Timeshift snapshot on $(HOST)...$(RESET)"
|
||||
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --limit $(HOST) --tags timeshift,snapshot
|
||||
@echo "$(GREEN)✓ Snapshot created$(RESET)"
|
||||
|
||||
timeshift-list: ## List Timeshift snapshots (usage: make timeshift-list HOST=dev02)
|
||||
ifndef HOST
|
||||
@echo "$(RED)Error: HOST parameter required$(RESET)"
|
||||
@echo "Usage: make timeshift-list HOST=dev02"
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(YELLOW)Listing Timeshift snapshots on $(HOST)...$(RESET)"
|
||||
@$(ANSIBLE_PLAYBOOK) playbooks/timeshift.yml --limit $(HOST) -e "timeshift_action=list" $(ANSIBLE_ARGS)
|
||||
|
||||
timeshift-restore: ## Restore from Timeshift snapshot (usage: make timeshift-restore HOST=dev02 SNAPSHOT=2025-12-17_21-30-00)
|
||||
ifndef HOST
|
||||
@echo "$(RED)Error: HOST parameter required$(RESET)"
|
||||
@echo "Usage: make timeshift-restore HOST=dev02 SNAPSHOT=2025-12-17_21-30-00"
|
||||
@exit 1
|
||||
endif
|
||||
ifndef SNAPSHOT
|
||||
@echo "$(RED)Error: SNAPSHOT parameter required$(RESET)"
|
||||
@echo "Usage: make timeshift-restore HOST=dev02 SNAPSHOT=2025-12-17_21-30-00"
|
||||
@echo "$(YELLOW)Available snapshots:$(RESET)"
|
||||
@$(MAKE) timeshift-list HOST=$(HOST)
|
||||
@exit 1
|
||||
endif
|
||||
@echo "$(RED)WARNING: This will restore the system to snapshot $(SNAPSHOT)$(RESET)"
|
||||
@echo "$(YELLOW)This action cannot be undone. Continue? [y/N]$(RESET)"
|
||||
@read -r confirm && [ "$$confirm" = "y" ] || exit 1
|
||||
@echo "$(YELLOW)Restoring snapshot $(SNAPSHOT) on $(HOST)...$(RESET)"
|
||||
@$(ANSIBLE_PLAYBOOK) playbooks/timeshift.yml --limit $(HOST) -e "timeshift_action=restore timeshift_snapshot=$(SNAPSHOT)" $(ANSIBLE_ARGS)
|
||||
@echo "$(GREEN)✓ Snapshot restored$(RESET)"
|
||||
|
||||
test-connectivity: ## Test host connectivity with detailed diagnostics and recommendations
|
||||
@echo "$(YELLOW)Testing host connectivity...$(RESET)"
|
||||
@if [ -f "test_connectivity.py" ]; then \
|
||||
|
||||
218
README.md
218
README.md
@ -1,178 +1,80 @@
|
||||
# Ansible Infrastructure Management
|
||||
|
||||
Comprehensive infrastructure automation for development environments, server management, and VM provisioning.
|
||||
Ansible automation for development machines, service hosts, and **Proxmox-managed guests** (LXC-first, with a path for KVM VMs).
|
||||
|
||||
## 📊 **Current Status**
|
||||
|
||||
### ✅ **Completed Infrastructure**
|
||||
- **Core System**: Base packages, SSH hardening, user management
|
||||
- **Development Environment**: Git, Node.js, Python, Docker, modern CLI tools
|
||||
- **Shell Configuration**: Zsh + Oh My Zsh + Powerlevel10k + plugins
|
||||
- **Applications**: VS Code, Cursor, Brave, LibreOffice, desktop tools
|
||||
- **Monitoring**: System monitoring tools + custom scripts (`sysinfo`, `netinfo`)
|
||||
- **VPN Mesh**: Tailscale integration with automated auth keys
|
||||
- **Security**: UFW firewall, fail2ban, SSH hardening
|
||||
- **Maintenance**: Automated package updates and system cleanup
|
||||
|
||||
### 🎯 **Next Priorities**
|
||||
1. **Enhanced monitoring**: Grafana + Prometheus dashboard
|
||||
2. **Security hardening**: ClamAV antivirus, Lynis auditing, vulnerability scanning
|
||||
3. **Centralized logging**: ELK stack for log aggregation
|
||||
4. **CI/CD pipeline**: GitLab Runner or Jenkins integration
|
||||
5. **Advanced security**: Intrusion detection, automated patching
|
||||
|
||||
## 🚀 Quick Start
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
# Install Python deps + Ansible collections
|
||||
make bootstrap
|
||||
|
||||
# Set up secrets management
|
||||
make create-vault
|
||||
# Edit secrets (Proxmox credentials, SSH public key, etc.)
|
||||
make edit-group-vault
|
||||
|
||||
# Test configuration (comprehensive)
|
||||
make test
|
||||
|
||||
# Deploy to all hosts (dry run first)
|
||||
make check
|
||||
make apply
|
||||
# Validate the repo
|
||||
make test-syntax
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
## Proxmox app projects (LXC-first)
|
||||
|
||||
### Getting Started
|
||||
- [**Initial Setup Guide**](docs/guides/setup.md) - First-time setup instructions
|
||||
- [**Ansible Vault Guide**](docs/guides/vault.md) - Managing secrets securely
|
||||
- [**Tailscale VPN Setup**](docs/guides/tailscale.md) - Mesh networking configuration
|
||||
This repo can provision and configure **dev/qa/prod guests per application project** using the `app_projects` model.
|
||||
|
||||
### Reference
|
||||
- [**Installed Applications**](docs/reference/applications.md) - Complete software inventory
|
||||
- [**Makefile Commands**](docs/reference/makefile.md) - All available make targets
|
||||
- [**Architecture Overview**](docs/reference/architecture.md) - System design and structure
|
||||
- **Configure projects**: `inventories/production/group_vars/all/main.yml` (`app_projects`)
|
||||
- **Configure secrets**: `inventories/production/group_vars/all/vault.yml` (encrypted)
|
||||
- **Run end-to-end**:
|
||||
|
||||
## 🏗️ Project Structure
|
||||
```bash
|
||||
make app PROJECT=projectA
|
||||
```
|
||||
|
||||
Other useful entry points:
|
||||
|
||||
- **Provision only**: `make app-provision PROJECT=projectA`
|
||||
- **Configure only**: `make app-configure PROJECT=projectA`
|
||||
- **Info / safety**: `make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc|qemu|all]`
|
||||
|
||||
Safety notes:
|
||||
|
||||
- **IP conflict precheck**: provisioning fails if the target IP responds
|
||||
(override with `-e allow_ip_conflicts=true` only if you really mean it).
|
||||
- **VMID/CTID collision guardrail**: provisioning fails if the VMID exists but the guest name doesn't match
|
||||
(override with `-e allow_vmid_collision=true` only if you really mean it).
|
||||
- **No destructive playbooks**: this repo intentionally does **not** ship “destroy/decommission” automation.
|
||||
|
||||
Docs:
|
||||
|
||||
- `docs/guides/app_stack_proxmox.md`
|
||||
- `docs/guides/app_stack_execution_flow.md`
|
||||
|
||||
## Project structure (relevant paths)
|
||||
|
||||
```
|
||||
ansible/
|
||||
├── Makefile # Task automation
|
||||
├── ansible.cfg # Ansible configuration
|
||||
├── hosts # Inventory file
|
||||
├── collections/
|
||||
│ └── requirements.yml # Galaxy dependencies
|
||||
├── group_vars/ # Global variables
|
||||
│ ├── all.yml
|
||||
│ └── all/vault.yml # Encrypted secrets
|
||||
├── host_vars/ # Host-specific configs
|
||||
├── roles/ # Ansible roles
|
||||
│ ├── base/ # Core system setup
|
||||
│ ├── development/ # Dev tools
|
||||
│ ├── docker/ # Container platform
|
||||
│ ├── monitoring/ # System monitoring
|
||||
│ ├── tailscale/ # VPN networking
|
||||
│ └── ... # Additional roles
|
||||
├── Makefile
|
||||
├── ansible.cfg
|
||||
├── collections/requirements.yml
|
||||
├── inventories/production/
|
||||
│ ├── hosts
|
||||
│ ├── group_vars/all/
|
||||
│ │ ├── main.yml
|
||||
│ │ ├── vault.yml
|
||||
│ │ └── vault.example.yml
|
||||
│ └── host_vars/
|
||||
├── playbooks/
|
||||
│ ├── dev-playbook.yml # Development setup
|
||||
│ ├── local-playbook.yml # Local machine
|
||||
│ ├── maintenance-playbook.yml
|
||||
│ └── tailscale-playbook.yml
|
||||
└── docs/ # Documentation
|
||||
├── guides/ # How-to guides
|
||||
└── reference/ # Technical reference
|
||||
│ ├── app/
|
||||
│ │ ├── site.yml
|
||||
│ │ ├── provision_vms.yml
|
||||
│ │ ├── configure_app.yml
|
||||
│ │ └── proxmox_info.yml
|
||||
│ └── site.yml
|
||||
└── roles/
|
||||
├── proxmox_vm/
|
||||
├── base_os/
|
||||
├── app_setup/
|
||||
└── pote/
|
||||
```
|
||||
|
||||
## 🎯 Key Features
|
||||
## Documentation
|
||||
|
||||
### Infrastructure Management
|
||||
- **Automated Provisioning**: Proxmox VM creation and configuration
|
||||
- **Configuration Management**: Consistent setup across all machines
|
||||
- **Network Security**: Tailscale VPN mesh networking
|
||||
- **System Maintenance**: Automated updates and cleanup
|
||||
|
||||
### Development Environment
|
||||
- **Shell Environment**: Zsh + Oh My Zsh + Powerlevel10k
|
||||
- **Container Platform**: Docker CE with Compose
|
||||
- **Development Tools**: Node.js, Python, Git, build tools
|
||||
- **Code Editors**: VS Code, Cursor IDE
|
||||
|
||||
### Security & Monitoring
|
||||
- **SSH Hardening**: Modern crypto, key-only auth, fail2ban
|
||||
- **Firewall**: UFW with sensible defaults
|
||||
- **Monitoring Tools**: btop, iotop, nethogs, custom dashboards
|
||||
|
||||
## 🧪 Testing & Validation
|
||||
|
||||
### Comprehensive Testing
|
||||
```bash
|
||||
make test # Full test suite (lint + syntax + validation)
|
||||
make test-syntax # Syntax and configuration validation only
|
||||
make lint # Ansible-lint only
|
||||
```
|
||||
|
||||
### Testing Coverage
|
||||
- **Playbook syntax**: All main playbooks and infrastructure playbooks
|
||||
- **Role validation**: All role test playbooks
|
||||
- **Configuration files**: YAML and INI file validation
|
||||
- **Documentation**: Markdown syntax and link checking (installed via `make bootstrap`)
|
||||
- **Linting**: Full Ansible best practices validation
|
||||
|
||||
## 🖥️ Managed Hosts
|
||||
|
||||
| Host | Type | OS | Purpose |
|
||||
|------|------|-----|---------|
|
||||
| dev01 | Physical | Debian | Primary development |
|
||||
| bottom | Physical | Debian | Secondary development |
|
||||
| debianDesktopVM | VM | Debian | Desktop environment |
|
||||
| giteaVM | VM | Alpine | Git repository hosting |
|
||||
| portainerVM | VM | Alpine | Container management |
|
||||
| homepageVM | VM | Debian | Service dashboard |
|
||||
|
||||
## 🔧 Common Tasks
|
||||
|
||||
```bash
|
||||
# System Maintenance
|
||||
make maintenance # Update all systems
|
||||
make maintenance HOST=dev01 # Update specific host
|
||||
|
||||
# Development Setup
|
||||
make docker # Install Docker
|
||||
make shell # Configure shell
|
||||
make apps # Install applications
|
||||
|
||||
# Network & Security
|
||||
make tailscale # Deploy VPN
|
||||
make security # Security hardening
|
||||
make monitoring # Deploy monitoring
|
||||
|
||||
# Infrastructure
|
||||
make create-vm # Create new VM
|
||||
make status # Check connectivity
|
||||
make facts # Gather system info
|
||||
```
|
||||
|
||||
## 🛠️ Requirements
|
||||
|
||||
### Control Machine (where you run Ansible)
|
||||
- Python 3.x with `pipx` (recommended) or `pip3`
|
||||
- Node.js and `npm` (for documentation testing)
|
||||
- SSH access to target hosts
|
||||
- Ansible Vault password (for secrets)
|
||||
|
||||
### Target Hosts
|
||||
- SSH server running
|
||||
- Python 3.x
|
||||
- `sudo` access for the Ansible user
|
||||
|
||||
### Dependency Management
|
||||
All project dependencies are managed through standard requirements files:
|
||||
- **`requirements.txt`** - Python packages (ansible, ansible-lint, etc.)
|
||||
- **`package.json`** - Node.js packages (markdown tools)
|
||||
- **`collections/requirements.yml`** - Ansible collections
|
||||
|
||||
**Setup**: Run `make bootstrap` to install all dependencies automatically.
|
||||
|
||||
## 📝 Contributing
|
||||
|
||||
1. Test changes with `make check` (dry run)
|
||||
2. Follow existing patterns and naming conventions
|
||||
3. Update documentation for new features
|
||||
4. Encrypt sensitive data with Ansible Vault
|
||||
- **Guides**: `docs/guides/`
|
||||
- **Reference**: `docs/reference/`
|
||||
@ -1,4 +1,6 @@
|
||||
---
|
||||
# Collections required for this repo.
|
||||
# Install with: ansible-galaxy collection install -r collections/requirements.yml
|
||||
collections:
|
||||
- name: community.general
|
||||
version: ">=6.0.0"
|
||||
|
||||
9
configure_app.yml
Normal file
9
configure_app.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Wrapper playbook
|
||||
# Purpose:
|
||||
# ansible-playbook -i inventories/production configure_app.yml -e app_project=projectA
|
||||
|
||||
- name: Configure app project guests
|
||||
import_playbook: playbooks/app/configure_app.yml
|
||||
|
||||
|
||||
157
docs/ROADMAP.md
Normal file
157
docs/ROADMAP.md
Normal file
@ -0,0 +1,157 @@
|
||||
# Project Roadmap & Future Improvements
|
||||
|
||||
Ideas and plans for enhancing the Ansible infrastructure.
|
||||
|
||||
## 🚀 Quick Wins (< 30 minutes each)
|
||||
|
||||
### Monitoring Enhancements
|
||||
- [ ] Add Grafana + Prometheus for service monitoring dashboard
|
||||
- [ ] Implement health check scripts for critical services
|
||||
- [ ] Create custom Ansible callback plugin for better output
|
||||
|
||||
### Security Improvements
|
||||
- [ ] Add ClamAV antivirus scanning
|
||||
- [ ] Implement Lynis security auditing
|
||||
- [ ] Set up automatic security updates with unattended-upgrades
|
||||
- [ ] Add SSH key rotation mechanism
|
||||
- [ ] Implement connection monitoring and alerting
|
||||
|
||||
|
||||
## 📊 Medium Projects (1-2 hours each)
|
||||
|
||||
### Infrastructure Services
|
||||
- [ ] **Centralized Logging**: Deploy ELK stack (Elasticsearch, Logstash, Kibana)
|
||||
- [ ] **Container Orchestration**: Implement Docker Swarm or K3s
|
||||
- [ ] **CI/CD Pipeline**: Set up GitLab Runner or Jenkins
|
||||
- [ ] **Network Storage**: Configure NFS or Samba shares
|
||||
- [ ] **DNS Server**: Deploy Pi-hole for ad blocking and local DNS
|
||||
|
||||
### New Service VMs
|
||||
- [ ] **Monitoring VM**: Dedicated Prometheus + Grafana instance
|
||||
- [ ] **Media VM**: Plex/Jellyfin media server
|
||||
- [ ] **Security VM**: Security scanning and vulnerability monitoring
|
||||
- [ ] **Database VM**: PostgreSQL/MySQL for application data
|
||||
|
||||
## 🎯 Service-Specific Enhancements
|
||||
|
||||
### giteaVM (Alpine)
|
||||
Current: Git repository hosting ✅
|
||||
- [ ] Add CI/CD runners
|
||||
- [ ] Implement package registry
|
||||
- [ ] Set up webhook integrations
|
||||
- [ ] Add code review tools
|
||||
|
||||
### portainerVM (Alpine)
|
||||
Current: Container management ✅
|
||||
- [ ] Deploy Docker registry
|
||||
- [ ] Add image vulnerability scanning
|
||||
- [ ] Set up container monitoring
|
||||
|
||||
### homepageVM (Debian)
|
||||
Current: Service dashboard ✅
|
||||
- [ ] Add uptime monitoring (Uptime Kuma)
|
||||
- [ ] Create public status page
|
||||
- [ ] Implement service dependency mapping
|
||||
- [ ] Add performance metrics display
|
||||
|
||||
### Development VMs
|
||||
Current: Development environment ✅
|
||||
- [ ] Add code quality tools (SonarQube)
|
||||
- [ ] Deploy testing environments
|
||||
- [ ] Implement development databases
|
||||
- [ ] Set up local package caching (Artifactory/Nexus)
|
||||
|
||||
## 🔧 Ansible Improvements
|
||||
|
||||
### Role Enhancements
|
||||
- [ ] Create reusable database role (PostgreSQL, MySQL, Redis)
|
||||
- [ ] Develop monitoring role with multiple backends
|
||||
- [ ] Build certificate management role (Let's Encrypt)
|
||||
- [ ] Create reverse proxy role (nginx/traefik)
|
||||
|
||||
### Playbook Optimization
|
||||
- [ ] Implement dynamic inventory from cloud providers
|
||||
- [ ] Add parallel execution strategies
|
||||
- [ ] Create rollback mechanisms
|
||||
- [ ] Implement blue-green deployment patterns
|
||||
|
||||
### Testing & Quality
|
||||
- [ ] Add Molecule tests for all roles
|
||||
- [ ] Implement GitHub Actions CI/CD
|
||||
- [ ] Create integration test suite
|
||||
- [ ] Add performance benchmarking
|
||||
|
||||
## 📈 Long-term Goals
|
||||
|
||||
### High Availability
|
||||
- [ ] Implement cluster management for critical services
|
||||
- [ ] Set up load balancing
|
||||
- [ ] Create disaster recovery procedures
|
||||
- [ ] Implement automated failover
|
||||
|
||||
### Observability
|
||||
- [ ] Full APM (Application Performance Monitoring)
|
||||
- [ ] Distributed tracing
|
||||
- [ ] Log aggregation and analysis
|
||||
- [ ] Custom metrics and dashboards
|
||||
|
||||
### Automation
|
||||
- [ ] GitOps workflow implementation
|
||||
- [ ] Self-healing infrastructure
|
||||
- [ ] Automated scaling
|
||||
- [ ] Predictive maintenance
|
||||
|
||||
## 📝 Documentation Improvements
|
||||
|
||||
- [ ] Create video tutorials
|
||||
- [ ] Add architecture diagrams
|
||||
- [ ] Write troubleshooting guides
|
||||
- [ ] Create role development guide
|
||||
- [ ] Add contribution guidelines
|
||||
|
||||
## Priority Matrix
|
||||
|
||||
### ✅ **COMPLETED (This Week)**
|
||||
1. ~~Fix any existing shell issues~~ - Shell configuration working
|
||||
2. ~~Complete vault setup with all secrets~~ - Tailscale auth key in vault
|
||||
3. ~~Deploy monitoring basics~~ - System monitoring deployed
|
||||
4. ~~Fix Tailscale handler issues~~ - Case-sensitive handlers fixed
|
||||
|
||||
### 🎯 **IMMEDIATE (Next)**
|
||||
1. **Security hardening** - ClamAV, Lynis, vulnerability scanning
|
||||
2. **Enhanced monitoring** - Add Grafana + Prometheus
|
||||
3. **Security hardening** - ClamAV, Lynis auditing
|
||||
4. **SSH key management** - Fix remaining connectivity issues
|
||||
|
||||
### Short-term (This Month)
|
||||
1. Centralized logging
|
||||
2. Enhanced monitoring
|
||||
3. Security auditing
|
||||
4. Advanced security monitoring
|
||||
|
||||
### Medium-term (Quarter)
|
||||
1. CI/CD pipeline
|
||||
2. Container orchestration
|
||||
3. Service mesh
|
||||
4. Advanced monitoring
|
||||
|
||||
### Long-term (Year)
|
||||
1. Full HA implementation
|
||||
2. Multi-region support
|
||||
3. Complete observability
|
||||
4. Full automation
|
||||
|
||||
## Contributing
|
||||
|
||||
To add new ideas:
|
||||
1. Create an issue in the repository
|
||||
2. Label with `enhancement` or `feature`
|
||||
3. Discuss in team meetings
|
||||
4. Update this roadmap when approved
|
||||
|
||||
## Notes
|
||||
|
||||
- Focus on stability over features
|
||||
- Security and monitoring are top priorities
|
||||
- All changes should be tested in dev first
|
||||
- Document everything as you go
|
||||
205
docs/SECURITY_HARDENING_PLAN.md
Normal file
205
docs/SECURITY_HARDENING_PLAN.md
Normal file
@ -0,0 +1,205 @@
|
||||
# Security Hardening Implementation Plan
|
||||
|
||||
## 🔒 **Security Hardening Role Structure**
|
||||
|
||||
### **Phase 1: Antivirus Protection (ClamAV)**
|
||||
|
||||
**What gets installed:**
|
||||
```bash
|
||||
- clamav-daemon # Background scanning service
|
||||
- clamav-freshclam # Virus definition updates
|
||||
- clamav-milter # Email integration
|
||||
- clamdscan # Command-line scanner
|
||||
```
|
||||
|
||||
**What gets configured:**
|
||||
- **Daily scans** at 3 AM of critical directories
|
||||
- **Real-time monitoring** of `/home`, `/var/www`, `/tmp`
|
||||
- **Automatic updates** of virus definitions
|
||||
- **Email alerts** for detected threats
|
||||
- **Quarantine system** for suspicious files
|
||||
|
||||
**Ansible tasks:**
|
||||
```yaml
|
||||
- name: Install ClamAV
|
||||
apt:
|
||||
name: [clamav-daemon, clamav-freshclam, clamdscan]
|
||||
state: present
|
||||
|
||||
- name: Configure daily scans
|
||||
cron:
|
||||
name: "Daily ClamAV scan"
|
||||
job: "/usr/bin/clamscan -r /home /var/www --log=/var/log/clamav/daily.log"
|
||||
hour: "3"
|
||||
minute: "0"
|
||||
|
||||
- name: Enable real-time scanning
|
||||
systemd:
|
||||
name: clamav-daemon
|
||||
enabled: true
|
||||
state: started
|
||||
```
|
||||
|
||||
### **Phase 2: Security Auditing (Lynis)**
|
||||
|
||||
**What gets installed:**
|
||||
```bash
|
||||
- lynis # Security auditing tool
|
||||
- rkhunter # Rootkit hunter
|
||||
- chkrootkit # Additional rootkit detection
|
||||
```
|
||||
|
||||
**What gets configured:**
|
||||
- **Weekly security audits** with detailed reports
|
||||
- **Baseline security scoring** for comparison
|
||||
- **Automated hardening** of common issues
|
||||
- **Email reports** to administrators
|
||||
- **Trend tracking** of security improvements
|
||||
|
||||
**Ansible tasks:**
|
||||
```yaml
|
||||
- name: Install Lynis
|
||||
get_url:
|
||||
url: "https://downloads.cisofy.com/lynis/lynis-3.0.8.tar.gz"
|
||||
dest: "/tmp/lynis.tar.gz"
|
||||
|
||||
- name: Extract and install Lynis
|
||||
unarchive:
|
||||
src: "/tmp/lynis.tar.gz"
|
||||
dest: "/opt/"
|
||||
remote_src: yes
|
||||
|
||||
- name: Create weekly audit cron
|
||||
cron:
|
||||
name: "Weekly Lynis audit"
|
||||
job: "/opt/lynis/lynis audit system --quick --report-file /var/log/lynis/weekly-$(date +\\%Y\\%m\\%d).log"
|
||||
weekday: "0"
|
||||
hour: "2"
|
||||
minute: "0"
|
||||
```
|
||||
|
||||
### **Phase 3: Advanced Security Measures**
|
||||
|
||||
#### **File Integrity Monitoring (AIDE)**
|
||||
```yaml
|
||||
# Monitors critical system files for changes
|
||||
- Tracks modifications to /etc, /bin, /sbin, /usr/bin
|
||||
- Alerts on unauthorized changes
|
||||
- Creates cryptographic checksums
|
||||
- Daily integrity checks
|
||||
```
|
||||
|
||||
#### **Intrusion Detection (Fail2ban Enhancement)**
|
||||
```yaml
|
||||
# Already have basic fail2ban, enhance with:
|
||||
- SSH brute force protection ✅ (already done)
|
||||
- Web application attack detection
|
||||
- Port scan detection
|
||||
- DDoS protection rules
|
||||
- Geographic IP blocking
|
||||
```
|
||||
|
||||
#### **System Hardening**
|
||||
```yaml
|
||||
# Kernel security parameters
|
||||
- Disable unused network protocols
|
||||
- Enable ASLR (Address Space Layout Randomization)
|
||||
- Configure secure memory settings
|
||||
- Harden network stack parameters
|
||||
|
||||
# Service hardening
|
||||
- Disable unnecessary services
|
||||
- Secure service configurations
|
||||
- Implement principle of least privilege
|
||||
- Configure secure file permissions
|
||||
```
|
||||
|
||||
## 🎯 **Implementation Strategy**
|
||||
|
||||
### **Week 1: Basic Antivirus**
|
||||
```bash
|
||||
# Create security role
|
||||
mkdir -p roles/security/{tasks,templates,handlers,defaults}
|
||||
|
||||
# Implement ClamAV
|
||||
- Install and configure ClamAV
|
||||
- Set up daily scans
|
||||
- Configure email alerts
|
||||
- Test malware detection
|
||||
```
|
||||
|
||||
### **Week 2: Security Auditing**
|
||||
```bash
|
||||
# Add Lynis auditing
|
||||
- Install Lynis security scanner
|
||||
- Configure weekly audits
|
||||
- Create reporting dashboard
|
||||
- Baseline current security score
|
||||
```
|
||||
|
||||
### **Week 3: Advanced Hardening**
|
||||
```bash
|
||||
# Implement AIDE and enhanced fail2ban
|
||||
- File integrity monitoring
|
||||
- Enhanced intrusion detection
|
||||
- System parameter hardening
|
||||
- Security policy enforcement
|
||||
```
|
||||
|
||||
## 📊 **Expected Benefits**
|
||||
|
||||
### **Immediate (Week 1)**
|
||||
- ✅ **Malware protection** on all systems
|
||||
- ✅ **Automated threat detection**
|
||||
- ✅ **Real-time file monitoring**
|
||||
|
||||
### **Short-term (Month 1)**
|
||||
- ✅ **Security baseline** established
|
||||
- ✅ **Vulnerability identification**
|
||||
- ✅ **Automated hardening** applied
|
||||
- ✅ **Security trend tracking**
|
||||
|
||||
### **Long-term (Ongoing)**
|
||||
- ✅ **Proactive threat detection**
|
||||
- ✅ **Compliance reporting**
|
||||
- ✅ **Reduced attack surface**
|
||||
- ✅ **Security incident prevention**
|
||||
|
||||
## 🚨 **Security Alerts & Monitoring**
|
||||
|
||||
### **Alert Types:**
|
||||
1. **Critical**: Malware detected, system compromise
|
||||
2. **High**: Failed security audit, integrity violation
|
||||
3. **Medium**: Suspicious activity, configuration drift
|
||||
4. **Low**: Routine scan results, update notifications
|
||||
|
||||
### **Notification Methods:**
|
||||
- **Email alerts** for critical/high priority
|
||||
- **Log aggregation** in centralized system
|
||||
- **Dashboard indicators** in monitoring system
|
||||
- **Weekly reports** with security trends
|
||||
|
||||
## 🔧 **Integration with Existing Infrastructure**
|
||||
|
||||
### **Works with your current setup:**
|
||||
- ✅ **Fail2ban** - Enhanced with more rules
|
||||
- ✅ **UFW firewall** - Additional hardening rules
|
||||
- ✅ **SSH hardening** - Extended with key rotation
|
||||
- ✅ **Monitoring** - Security metrics integration
|
||||
- ✅ **Maintenance** - Security updates automation
|
||||
|
||||
### **Complements Proxmox + NAS:**
|
||||
- **File-level protection** vs. VM snapshots
|
||||
- **Real-time detection** vs. snapshot recovery
|
||||
- **Proactive prevention** vs. reactive restoration
|
||||
- **Security compliance** vs. data protection
|
||||
|
||||
## 📋 **Next Steps**
|
||||
|
||||
1. **Create security role** structure
|
||||
2. **Implement ClamAV** antivirus protection
|
||||
3. **Add Lynis** security auditing
|
||||
4. **Configure monitoring** integration
|
||||
5. **Test and validate** security improvements
|
||||
|
||||
Would you like me to start implementing the security role?
|
||||
173
docs/guides/app_stack_execution_flow.md
Normal file
173
docs/guides/app_stack_execution_flow.md
Normal file
@ -0,0 +1,173 @@
|
||||
# App stack execution flow (what happens when you run it)
|
||||
|
||||
This document describes **exactly** what Ansible runs and what it changes when you execute the Proxmox app stack playbooks.
|
||||
|
||||
## Entry points
|
||||
|
||||
- Recommended end-to-end run:
|
||||
- `playbooks/app/site.yml`
|
||||
- Repo-root wrappers (equivalent):
|
||||
- `site.yml` (imports `playbooks/site.yml`, and you can `--tags app`)
|
||||
- `provision_vms.yml` (imports `playbooks/app/provision_vms.yml`)
|
||||
- `configure_app.yml` (imports `playbooks/app/configure_app.yml`)
|
||||
|
||||
## High-level flow
|
||||
|
||||
When you run `playbooks/app/site.yml`, it imports two playbooks in order:
|
||||
|
||||
1. `playbooks/app/provision_vms.yml` (**Proxmox API changes happen here**)
|
||||
2. `playbooks/app/configure_app.yml` (**SSH into guests and configure OS/app**)
|
||||
|
||||
## Variables that drive everything
|
||||
|
||||
All per-project/per-env inputs come from:
|
||||
|
||||
- `inventories/production/group_vars/all/main.yml` → `app_projects`
|
||||
|
||||
Each `app_projects.<project>.envs.<env>` contains:
|
||||
|
||||
- `name` (container hostname / inventory host name)
|
||||
- `vmid` (Proxmox CTID)
|
||||
- `ip` (static IP in CIDR form, e.g. `10.0.10.101/24`)
|
||||
- `gateway` (e.g. `10.0.10.1`)
|
||||
- `branch` (`dev`, `qa`, `main`)
|
||||
- `env_vars` (key/value map written to `/srv/app/.env.<env>`)
|
||||
|
||||
Proxmox connection variables are also read from `inventories/production/group_vars/all/main.yml` but are usually vault-backed:
|
||||
|
||||
- `proxmox_host: "{{ vault_proxmox_host }}"`
|
||||
- `proxmox_user: "{{ vault_proxmox_user }}"`
|
||||
- `proxmox_node: "{{ vault_proxmox_node | default('pve') }}"`
|
||||
|
||||
## Phase 1: Provisioning via Proxmox API
|
||||
|
||||
### File chain
|
||||
|
||||
`playbooks/app/site.yml` imports `playbooks/app/provision_vms.yml`, which does:
|
||||
|
||||
- Validates `app_project` exists (if you passed one)
|
||||
- Loops projects → includes `playbooks/app/provision_one_guest.yml`
|
||||
- Loops envs inside the project → includes `playbooks/app/provision_one_env.yml`
|
||||
|
||||
### Preflight IP safety check
|
||||
|
||||
In `playbooks/app/provision_one_env.yml`:
|
||||
|
||||
- It runs `ping` against the target IP.
|
||||
- If the IP responds, the play **fails** to prevent accidental duplicate-IP provisioning.
|
||||
- You can override the guard (not recommended) with `-e allow_ip_conflicts=true`.
|
||||
|
||||
### What it creates/updates in Proxmox
|
||||
|
||||
In `playbooks/app/provision_one_env.yml` it calls role `roles/proxmox_vm` with LXC variables.
|
||||
|
||||
`roles/proxmox_vm/tasks/main.yml` dispatches:
|
||||
|
||||
- If `proxmox_guest_type == 'lxc'` → includes `roles/proxmox_vm/tasks/lxc.yml`
|
||||
|
||||
`roles/proxmox_vm/tasks/lxc.yml` performs:
|
||||
|
||||
1. **Build CT network config**
|
||||
- Produces a `netif` dict like:
|
||||
- `net0: name=eth0,bridge=vmbr0,firewall=1,ip=<CIDR>,gw=<GW>`
|
||||
|
||||
2. **Create/update the container**
|
||||
- Uses `community.proxmox.proxmox` with:
|
||||
- `state: present`
|
||||
- `update: true` (so re-runs reconcile config)
|
||||
- `vmid`, `hostname`, `ostemplate`, CPU/mem/swap, rootfs sizing, `netif`
|
||||
- `pubkey` and optionally `password` for initial root access
|
||||
|
||||
3. **Start the container**
|
||||
- Ensures `state: started` (if `lxc_start_after_create: true`)
|
||||
|
||||
4. **Wait for SSH**
|
||||
- `wait_for: host=<ip> port=22`
|
||||
|
||||
### Dynamic inventory creation
|
||||
|
||||
Still in `playbooks/app/provision_one_env.yml`, it calls `ansible.builtin.add_host` so the guests become available to later plays:
|
||||
|
||||
- Adds the guest to groups:
|
||||
- `app_all`
|
||||
- `app_<project>_all`
|
||||
- `app_<project>_<env>`
|
||||
- Sets:
|
||||
- `ansible_host` to the IP (without CIDR)
|
||||
- `ansible_user: root` (bootstrap user for first config)
|
||||
- `app_project`, `app_env` facts
|
||||
|
||||
## Phase 2: Configure OS + app on the guests
|
||||
|
||||
`playbooks/app/configure_app.yml` contains two plays:
|
||||
|
||||
### Play A: Build dynamic inventory (localhost)
|
||||
|
||||
This play exists so you can run `configure_app.yml` even if you didn’t run provisioning in the same Ansible invocation.
|
||||
|
||||
- It loops over projects/envs from `app_projects`
|
||||
- Adds hosts to:
|
||||
- `app_all`, `app_<project>_all`, `app_<project>_<env>`
|
||||
- Uses:
|
||||
- `ansible_user: "{{ app_bootstrap_user | default('root') }}"`
|
||||
|
||||
### Play B: Configure the hosts (SSH + sudo)
|
||||
|
||||
Targets:
|
||||
|
||||
- If you pass `-e app_project=projectA` → `hosts: app_projectA_all`
|
||||
- Otherwise → `hosts: app_all`
|
||||
|
||||
Tasks executed on each guest:
|
||||
|
||||
1. **Resolve effective project/env variables**
|
||||
- `project_def = app_projects[app_project]`
|
||||
- `env_def = app_projects[app_project].envs[app_env]`
|
||||
|
||||
2. **Role: `base_os`** (`roles/base_os/tasks/main.yml`)
|
||||
- Updates apt cache
|
||||
- Installs baseline packages (git/curl/nodejs/npm/ufw/etc.)
|
||||
- Creates `appuser` (passwordless sudo)
|
||||
- Adds your SSH public key to `appuser`
|
||||
- Enables UFW and allows:
|
||||
- SSH (22)
|
||||
- backend port (default `3001`, overridable per project)
|
||||
- frontend port (default `3000`, overridable per project)
|
||||
|
||||
3. **Role: `app_setup`** (`roles/app_setup/tasks/main.yml`)
|
||||
- Creates:
|
||||
- `/srv/app`
|
||||
- `/srv/app/backend`
|
||||
- `/srv/app/frontend`
|
||||
- Writes the env file:
|
||||
- `/srv/app/.env.<dev|qa|prod>` from template `roles/app_setup/templates/env.j2`
|
||||
- Writes the deploy script:
|
||||
- `/usr/local/bin/deploy_app.sh` from `roles/app_setup/templates/deploy_app.sh.j2`
|
||||
- Script does:
|
||||
- `git clone` if missing
|
||||
- `git checkout/pull` correct branch
|
||||
- runs backend install + migrations
|
||||
- runs frontend install + build
|
||||
- restarts systemd services
|
||||
- Writes systemd units:
|
||||
- `/etc/systemd/system/app-backend.service` from `app-backend.service.j2`
|
||||
- `/etc/systemd/system/app-frontend.service` from `app-frontend.service.j2`
|
||||
- Reloads systemd and enables/starts both services
|
||||
|
||||
## What changes on first run vs re-run
|
||||
|
||||
- **Provisioning**:
|
||||
- First run: creates CTs in Proxmox, sets static IP config, starts them.
|
||||
- Re-run: reconciles settings because `update: true` is used.
|
||||
- **Configuration**:
|
||||
- Mostly idempotent (directories/templates/users/firewall/services converge).
|
||||
|
||||
## Common “before you run” checklist
|
||||
|
||||
- Confirm `app_projects` has correct IPs/CTIDs/branches:
|
||||
- `inventories/production/group_vars/all/main.yml`
|
||||
- Ensure vault has Proxmox + SSH key material:
|
||||
- `inventories/production/group_vars/all/vault.yml`
|
||||
- Reference template: `inventories/production/group_vars/all/vault.example.yml`
|
||||
|
||||
|
||||
90
docs/guides/app_stack_proxmox.md
Normal file
90
docs/guides/app_stack_proxmox.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Proxmox App Projects (LXC-first)
|
||||
|
||||
This guide documents the **modular app-project stack** that provisions Proxmox guests (dev/qa/prod) and configures a full-stack app layout on them.
|
||||
|
||||
## What you get
|
||||
|
||||
- Proxmox provisioning via API (currently **LXC**; VM support remains via existing `roles/proxmox_vm` KVM path)
|
||||
- A deployment user (`appuser`) with your SSH key
|
||||
- `/srv/app/backend` and `/srv/app/frontend`
|
||||
- Env file `/srv/app/.env.<dev|qa|prod>`
|
||||
- `/usr/local/bin/deploy_app.sh` to pull the right branch and restart services
|
||||
- systemd services:
|
||||
- `app-backend.service`
|
||||
- `app-frontend.service`
|
||||
|
||||
## Where to configure projects
|
||||
|
||||
Edit:
|
||||
|
||||
- `inventories/production/group_vars/all/main.yml`
|
||||
|
||||
Under `app_projects`, define projects like:
|
||||
|
||||
- `projectA.repo_url`
|
||||
- `projectA.envs.dev|qa|prod.ip/gateway/branch`
|
||||
- `projectA.guest_defaults` (cores/memory/rootfs sizing)
|
||||
- `projectA.deploy.*` (install/build/migrate/start commands)
|
||||
|
||||
Adding **projectB** is just adding another top-level `app_projects.projectB` entry.
|
||||
|
||||
## Proxmox credentials (vault)
|
||||
|
||||
This repo already expects Proxmox connection vars in vault (see existing Proxmox playbooks). Ensure these exist in:
|
||||
|
||||
- `inventories/production/group_vars/all/vault.yml` (encrypted)
|
||||
|
||||
Common patterns:
|
||||
|
||||
- `vault_proxmox_host`: `10.0.10.201`
|
||||
- `vault_proxmox_user`: e.g. `root@pam` or `ansible@pve`
|
||||
- `vault_proxmox_node`: e.g. `pve`
|
||||
- Either:
|
||||
- `vault_proxmox_password`, or
|
||||
- `vault_proxmox_token` + `vault_proxmox_token_id`
|
||||
|
||||
## Debian LXC template
|
||||
|
||||
The LXC provisioning uses `lxc_ostemplate`, defaulting to a Debian 12 template string like:
|
||||
|
||||
`local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst`
|
||||
|
||||
If your Proxmox has a different template filename, change `lxc_ostemplate` in `inventories/production/group_vars/all/main.yml`.
|
||||
|
||||
## Running it
|
||||
|
||||
Provision + configure one project:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
|
||||
```
|
||||
|
||||
Provision + configure all projects in `app_projects`:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventories/production playbooks/app/site.yml
|
||||
```
|
||||
|
||||
Only provisioning (Proxmox API):
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventories/production playbooks/app/provision_vms.yml -e app_project=projectA
|
||||
```
|
||||
|
||||
Only OS/app configuration:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventories/production playbooks/app/configure_app.yml -e app_project=projectA
|
||||
```
|
||||
|
||||
## Optional: SSH aliases on your workstation
|
||||
|
||||
To write `~/.ssh/config` entries (disabled by default):
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventories/production playbooks/app/ssh_client_config.yml -e manage_ssh_config=true -e app_project=projectA
|
||||
```
|
||||
|
||||
This creates aliases like `projectA-dev`, `projectA-qa`, `projectA-prod`.
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ vault_ssh_public_key: "ssh-ed25519 AAAA..."
|
||||
## Step 7: Configure Variables
|
||||
|
||||
### Global Settings
|
||||
Edit `group_vars/all.yml`:
|
||||
Edit `inventories/production/group_vars/all/main.yml`:
|
||||
```yaml
|
||||
# Timezone and locale
|
||||
timezone: "America/New_York" # Your timezone
|
||||
@ -145,7 +145,7 @@ ssh_permit_root_login: "no"
|
||||
```
|
||||
|
||||
### Host-Specific Settings
|
||||
Create/edit `host_vars/hostname.yml` for host-specific configuration.
|
||||
Create/edit `inventories/production/host_vars/<hostname>.yml` for host-specific configuration.
|
||||
|
||||
## Step 8: Test Configuration
|
||||
|
||||
@ -159,7 +159,7 @@ make check
|
||||
make check HOST=dev01
|
||||
|
||||
# Check specific role
|
||||
ansible-playbook dev-playbook.yml --check --tags docker
|
||||
ansible-playbook playbooks/development.yml --check --tags docker
|
||||
```
|
||||
|
||||
## Step 9: Deploy
|
||||
@ -208,7 +208,7 @@ ansible dev -m shell -a "tailscale status"
|
||||
|
||||
### Vault Password Issues
|
||||
- Check vault password file exists and has correct permissions
|
||||
- Verify password is correct: `ansible-vault view group_vars/all/vault.yml`
|
||||
- Verify password is correct: `ansible-vault view inventories/production/group_vars/all/vault.yml`
|
||||
|
||||
### Python Not Found
|
||||
- Install Python on target: `sudo apt install python3`
|
||||
|
||||
@ -46,21 +46,21 @@ make tailscale-status
|
||||
make tailscale-dev
|
||||
|
||||
# Specific hosts
|
||||
ansible-playbook tailscale-playbook.yml --limit "dev01,bottom"
|
||||
ansible-playbook playbooks/tailscale.yml --limit "dev01,bottom"
|
||||
```
|
||||
|
||||
### Manual Installation
|
||||
```bash
|
||||
# With custom auth key (not recommended - use vault instead)
|
||||
ansible-playbook tailscale-playbook.yml -e "tailscale_auth_key=your-key"
|
||||
ansible-playbook playbooks/tailscale.yml -e "tailscale_auth_key=your-key"
|
||||
|
||||
# As part of existing playbooks
|
||||
ansible-playbook dev-playbook.yml --tags tailscale
|
||||
ansible-playbook playbooks/development.yml --tags tailscale
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Global Settings (`group_vars/all.yml`)
|
||||
### Global Settings (`inventories/production/group_vars/all/main.yml`)
|
||||
```yaml
|
||||
tailscale_auth_key: "{{ vault_tailscale_auth_key }}" # From vault
|
||||
tailscale_accept_routes: true # Accept subnet routes
|
||||
@ -68,7 +68,7 @@ tailscale_accept_dns: true # Accept DNS settings
|
||||
tailscale_ssh: true # Enable SSH over Tailscale
|
||||
```
|
||||
|
||||
### Host-Specific Settings (`host_vars/hostname.yml`)
|
||||
### Host-Specific Settings (`inventories/production/host_vars/<hostname>.yml`)
|
||||
```yaml
|
||||
tailscale_hostname: "custom-name" # Override hostname
|
||||
tailscale_advertise_routes: "192.168.1.0/24" # Share local subnet
|
||||
@ -100,7 +100,7 @@ sudo tailscale up
|
||||
|
||||
### Reset Connection
|
||||
```bash
|
||||
ansible-playbook tailscale-playbook.yml -e "tailscale_reset=true"
|
||||
ansible-playbook playbooks/tailscale.yml -e "tailscale_reset=true"
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
@ -119,7 +119,7 @@ The role automatically detects OS and uses appropriate package manager.
|
||||
## How It Works
|
||||
|
||||
1. **Playbook runs** → looks for `tailscale_auth_key`
|
||||
2. **Checks `all.yml`** → finds `{{ vault_tailscale_auth_key }}`
|
||||
2. **Checks inventory group vars** → finds `{{ vault_tailscale_auth_key }}`
|
||||
3. **Decrypts vault** → retrieves actual auth key
|
||||
4. **Installs Tailscale** → configures with your settings
|
||||
5. **Connects to network** → machine appears in admin console
|
||||
|
||||
211
docs/guides/timeshift.md
Normal file
211
docs/guides/timeshift.md
Normal file
@ -0,0 +1,211 @@
|
||||
# Timeshift Snapshot and Rollback Guide
|
||||
|
||||
## Overview
|
||||
|
||||
Timeshift is a system restore utility that creates snapshots of your system before Ansible playbook execution.
|
||||
This allows you to easily rollback if something goes wrong during configuration changes.
|
||||
|
||||
## How It Works
|
||||
|
||||
When you run a playbook, the Timeshift role automatically:
|
||||
1. Checks if Timeshift is installed (installs if missing)
|
||||
2. Creates a snapshot before making any changes
|
||||
3. Tags the snapshot with "ansible" and "pre-playbook" for easy identification
|
||||
|
||||
## Usage
|
||||
|
||||
### Automatic Snapshots
|
||||
|
||||
Snapshots are created automatically when running playbooks:
|
||||
|
||||
```bash
|
||||
# Run playbook - snapshot created automatically
|
||||
make dev HOST=dev02
|
||||
|
||||
# Or run only snapshot creation
|
||||
make timeshift-snapshot HOST=dev02
|
||||
```
|
||||
|
||||
### List Snapshots
|
||||
|
||||
```bash
|
||||
# List all snapshots on a host
|
||||
make timeshift-list HOST=dev02
|
||||
|
||||
# Or manually on the host
|
||||
ssh ladmin@192.168.20.28 "sudo timeshift --list"
|
||||
```
|
||||
|
||||
### Restore from Snapshot
|
||||
|
||||
```bash
|
||||
# Restore from a specific snapshot
|
||||
make timeshift-restore HOST=dev02 SNAPSHOT=2025-12-17_21-30-00
|
||||
|
||||
# The command will:
|
||||
# 1. Show available snapshots if SNAPSHOT is not provided
|
||||
# 2. Ask for confirmation before restoring
|
||||
# 3. Restore the system to that snapshot
|
||||
```
|
||||
|
||||
### Manual Snapshot
|
||||
|
||||
```bash
|
||||
# Create snapshot manually on host
|
||||
ssh ladmin@192.168.20.28
|
||||
sudo timeshift --create --comments "Manual snapshot before manual changes"
|
||||
```
|
||||
|
||||
### Manual Restore
|
||||
|
||||
```bash
|
||||
# SSH to host
|
||||
ssh ladmin@192.168.20.28
|
||||
|
||||
# List snapshots
|
||||
sudo timeshift --list
|
||||
|
||||
# Restore (interactive)
|
||||
sudo timeshift --restore
|
||||
|
||||
# Or restore specific snapshot (non-interactive)
|
||||
sudo timeshift --restore --snapshot '2025-12-17_21-30-00' --scripted
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Disable Auto-Snapshots
|
||||
|
||||
If you don't want automatic snapshots, disable them in `host_vars` or `group_vars`:
|
||||
|
||||
```yaml
|
||||
# inventories/production/host_vars/dev02.yml
|
||||
timeshift_auto_snapshot: false
|
||||
```
|
||||
|
||||
### Customize Snapshot Settings
|
||||
|
||||
```yaml
|
||||
# inventories/production/group_vars/dev/main.yml
|
||||
timeshift_snapshot_description: "Pre-deployment snapshot"
|
||||
timeshift_snapshot_tags: ["ansible", "deployment"]
|
||||
timeshift_keep_daily: 7
|
||||
timeshift_keep_weekly: 4
|
||||
timeshift_keep_monthly: 6
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
### Disk Space
|
||||
|
||||
- Snapshots require significant disk space (typically 10-50% of system size)
|
||||
- RSYNC snapshots are larger but work on any filesystem
|
||||
- BTRFS snapshots are smaller but require BTRFS filesystem
|
||||
- Monitor disk usage: `df -h /timeshift`
|
||||
|
||||
### What Gets Backed Up
|
||||
|
||||
By default, Timeshift backs up:
|
||||
- ✅ System files (`/etc`, `/usr`, `/boot`, etc.)
|
||||
- ✅ System configuration
|
||||
- ❌ User home directories (`/home`) - excluded by default
|
||||
- ❌ User data
|
||||
|
||||
### Recovery Process
|
||||
|
||||
1. **Boot from recovery** (if system won't boot):
|
||||
- Boot from live USB
|
||||
- Install Timeshift: `sudo apt install timeshift`
|
||||
- Run: `sudo timeshift --restore`
|
||||
|
||||
2. **Restore from running system**:
|
||||
- SSH to host
|
||||
- Run: `sudo timeshift --restore`
|
||||
- Select snapshot and confirm
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Always create snapshots before major changes**
|
||||
```bash
|
||||
make timeshift-snapshot HOST=dev02
|
||||
make dev HOST=dev02
|
||||
```
|
||||
|
||||
2. **Test rollback process** before you need it
|
||||
```bash
|
||||
# Create test snapshot
|
||||
make timeshift-snapshot HOST=dev02
|
||||
|
||||
# Make a test change
|
||||
# ...
|
||||
|
||||
# Practice restoring
|
||||
make timeshift-list HOST=dev02
|
||||
make timeshift-restore HOST=dev02 SNAPSHOT=<test-snapshot>
|
||||
```
|
||||
|
||||
3. **Monitor snapshot disk usage**
|
||||
```bash
|
||||
ssh ladmin@192.168.20.28 "df -h /timeshift"
|
||||
```
|
||||
|
||||
4. **Clean up old snapshots** if needed
|
||||
```bash
|
||||
ssh ladmin@192.168.20.28 "sudo timeshift --delete --snapshot 'OLD-SNAPSHOT'"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Snapshot Creation Fails
|
||||
|
||||
```bash
|
||||
# Check Timeshift status
|
||||
ssh ladmin@192.168.20.28 "sudo timeshift --list"
|
||||
|
||||
# Check disk space
|
||||
ssh ladmin@192.168.20.28 "df -h"
|
||||
|
||||
# Check Timeshift logs
|
||||
ssh ladmin@192.168.20.28 "sudo journalctl -u timeshift"
|
||||
```
|
||||
|
||||
### Restore Fails
|
||||
|
||||
- Ensure you have enough disk space
|
||||
- Check that snapshot still exists: `sudo timeshift --list`
|
||||
- Try booting from recovery media if system won't boot
|
||||
|
||||
### Disk Full
|
||||
|
||||
```bash
|
||||
# List snapshots
|
||||
sudo timeshift --list
|
||||
|
||||
# Delete old snapshots
|
||||
sudo timeshift --delete --snapshot 'OLD-SNAPSHOT'
|
||||
|
||||
# Or configure retention in group_vars
|
||||
timeshift_keep_daily: 3 # Reduce from 7
|
||||
timeshift_keep_weekly: 2 # Reduce from 4
|
||||
```
|
||||
|
||||
## Integration with Ansible
|
||||
|
||||
The Timeshift role is automatically included in the development playbook and runs first to create snapshots before any changes are made.
|
||||
This ensures you always have a restore point.
|
||||
|
||||
```yaml
|
||||
# playbooks/development.yml
|
||||
roles:
|
||||
- {role: timeshift, tags: ['timeshift', 'snapshot']} # Runs first
|
||||
- {role: base}
|
||||
- {role: development}
|
||||
# ... other roles
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Timeshift Documentation](https://github.com/teejee2008/timeshift)
|
||||
- [Ansible Vault Guide](./vault.md) - For securing passwords
|
||||
- [Maintenance Guide](../reference/makefile.md) - For system maintenance
|
||||
|
||||
@ -6,7 +6,7 @@ Ansible Vault encrypts sensitive data like passwords and API keys while keeping
|
||||
|
||||
### Create Vault
|
||||
```bash
|
||||
make create-vault
|
||||
make edit-group-vault
|
||||
```
|
||||
|
||||
### Add Secrets
|
||||
@ -38,32 +38,31 @@ database_password: "{{ vault_db_password }}"
|
||||
|
||||
## File Structure
|
||||
```
|
||||
group_vars/
|
||||
├── all.yml # Plain text configuration
|
||||
└── all/
|
||||
└── vault.yml # Encrypted secrets (created by make create-vault)
|
||||
|
||||
host_vars/
|
||||
├── dev01.yml # Host-specific plain text
|
||||
└── dev01/
|
||||
└── vault.yml # Host-specific secrets
|
||||
inventories/production/
|
||||
├── group_vars/
|
||||
│ └── all/
|
||||
│ ├── main.yml # Plain text configuration
|
||||
│ └── vault.yml # Encrypted secrets (edit with make edit-group-vault)
|
||||
└── host_vars/
|
||||
├── dev01.yml # Host-specific plain text
|
||||
└── dev01/
|
||||
└── vault.yml # Host-specific secrets (edit with make edit-vault HOST=dev01)
|
||||
```
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
# Create new vault
|
||||
make create-vault
|
||||
# Edit group vault (production inventory)
|
||||
make edit-group-vault
|
||||
|
||||
# Edit existing vault
|
||||
make edit-vault # Global vault
|
||||
make edit-vault HOST=dev01 # Host-specific vault
|
||||
# Edit host-specific vault
|
||||
make edit-vault HOST=dev01
|
||||
|
||||
# View decrypted contents
|
||||
ansible-vault view group_vars/all/vault.yml
|
||||
ansible-vault view inventories/production/group_vars/all/vault.yml
|
||||
|
||||
# Change vault password
|
||||
ansible-vault rekey group_vars/all/vault.yml
|
||||
ansible-vault rekey inventories/production/group_vars/all/vault.yml
|
||||
```
|
||||
|
||||
## Password Management
|
||||
|
||||
@ -97,7 +97,7 @@ Complete inventory of applications and tools deployed by Ansible playbooks.
|
||||
|
||||
## Installation by Playbook
|
||||
|
||||
### dev-playbook.yml
|
||||
### `playbooks/development.yml`
|
||||
Installs all roles for development machines:
|
||||
- All system tools
|
||||
- Development environment
|
||||
@ -107,14 +107,14 @@ Installs all roles for development machines:
|
||||
- Monitoring tools
|
||||
- Tailscale VPN
|
||||
|
||||
### local-playbook.yml
|
||||
### `playbooks/local.yml`
|
||||
Installs for local machine management:
|
||||
- Core system tools
|
||||
- Shell environment
|
||||
- Development basics
|
||||
- Selected applications
|
||||
|
||||
### maintenance-playbook.yml
|
||||
### `playbooks/maintenance.yml`
|
||||
Maintains existing installations:
|
||||
- System updates
|
||||
- Package cleanup
|
||||
|
||||
@ -80,7 +80,7 @@ Technical architecture and design of the Ansible infrastructure management syste
|
||||
### Core Playbooks
|
||||
|
||||
```yaml
|
||||
dev-playbook.yml # Development environment setup
|
||||
playbooks/development.yml # Development environment setup
|
||||
├── roles/maintenance # System updates
|
||||
├── roles/base # Core packages
|
||||
├── roles/ssh # SSH hardening
|
||||
@ -93,20 +93,24 @@ dev-playbook.yml # Development environment setup
|
||||
├── roles/tailscale # VPN setup
|
||||
├── roles/monitoring # Monitoring tools
|
||||
|
||||
local-playbook.yml # Local machine
|
||||
playbooks/local.yml # Local machine
|
||||
├── roles/base
|
||||
├── roles/shell
|
||||
├── roles/development
|
||||
└── roles/tailscale
|
||||
|
||||
maintenance-playbook.yml # System maintenance
|
||||
playbooks/maintenance.yml # System maintenance
|
||||
└── roles/maintenance
|
||||
|
||||
tailscale-playbook.yml # VPN deployment
|
||||
playbooks/tailscale.yml # VPN deployment
|
||||
└── roles/tailscale
|
||||
|
||||
proxmox-create-vm.yml # VM provisioning
|
||||
playbooks/infrastructure/proxmox-vm.yml # KVM VM provisioning (controller VM, etc.)
|
||||
└── roles/proxmox_vm
|
||||
|
||||
playbooks/app/site.yml # Proxmox app stack (LXC-first)
|
||||
├── playbooks/app/provision_vms.yml # Proxmox API provisioning (LXC/KVM)
|
||||
└── playbooks/app/configure_app.yml # Guest OS + app configuration over SSH
|
||||
```
|
||||
|
||||
### Role Dependencies
|
||||
@ -146,9 +150,9 @@ tailscale
|
||||
## Data Flow
|
||||
|
||||
### Configuration Management
|
||||
1. **Variables** → group_vars/all.yml (global)
|
||||
2. **Secrets** → group_vars/all/vault.yml (encrypted)
|
||||
3. **Host Config** → host_vars/hostname.yml (specific)
|
||||
1. **Variables** → `inventories/production/group_vars/all/main.yml`
|
||||
2. **Secrets** → `inventories/production/group_vars/all/vault.yml` (encrypted)
|
||||
3. **Host Config** → `inventories/production/host_vars/<hostname>.yml`
|
||||
4. **Role Defaults** → roles/*/defaults/main.yml
|
||||
5. **Tasks** → roles/*/tasks/main.yml
|
||||
6. **Templates** → roles/*/templates/*.j2
|
||||
|
||||
@ -58,6 +58,10 @@ Complete reference for all available `make` commands in the Ansible project.
|
||||
| Command | Description | Usage |
|
||||
|---------|-------------|-------|
|
||||
| `create-vm` | Create Ansible controller VM on Proxmox | `make create-vm` |
|
||||
| `proxmox-info` | Show Proxmox guest info (LXC/VM) | `make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc\|qemu\|all]` |
|
||||
| `app-provision` | Provision app project guests on Proxmox | `make app-provision PROJECT=projectA` |
|
||||
| `app-configure` | Configure OS + app on project guests | `make app-configure PROJECT=projectA` |
|
||||
| `app` | Provision + configure app project guests | `make app PROJECT=projectA` |
|
||||
| `ping` | Ping hosts with colored output | `make ping [GROUP=dev] [HOST=dev01]` |
|
||||
| `facts` | Gather facts from all hosts | `make facts` |
|
||||
| `test-connectivity` | Test network and SSH access | `make test-connectivity` |
|
||||
@ -69,6 +73,7 @@ Complete reference for all available `make` commands in the Ansible project.
|
||||
| `copy-ssh-key` | Copy SSH key to specific host | `make copy-ssh-key HOST=giteaVM` |
|
||||
| `create-vault` | Create encrypted vault file | `make create-vault` |
|
||||
| `edit-vault` | Edit encrypted host vars | `make edit-vault HOST=dev01` |
|
||||
| `edit-group-vault` | Edit encrypted group vars (production inventory) | `make edit-group-vault` |
|
||||
|
||||
## Utility Commands
|
||||
|
||||
|
||||
@ -30,3 +30,269 @@ tailscale_accept_routes: true
|
||||
tailscale_accept_dns: true
|
||||
tailscale_ssh: false
|
||||
tailscale_hostname: "{{ inventory_hostname }}"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Proxmox + modular app projects (LXC-first)
|
||||
#
|
||||
# This repo can manage many independent apps ("projects"). Each project defines
|
||||
# its own dev/qa/prod guests (IPs/VMIDs/branches) under `app_projects`.
|
||||
#
|
||||
# Usage examples:
|
||||
# - Run one project: ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
|
||||
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/site.yml
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Proxmox API connection (keep secrets in vault)
|
||||
proxmox_host: "{{ vault_proxmox_host }}"
|
||||
proxmox_user: "{{ vault_proxmox_user }}"
|
||||
proxmox_node: "{{ vault_proxmox_node | default('pve') }}"
|
||||
proxmox_api_port: "{{ vault_proxmox_api_port | default(8006) }}"
|
||||
# Proxmox commonly uses a self-signed cert; keep validation off by default.
|
||||
proxmox_validate_certs: false
|
||||
|
||||
# Prefer API token auth (store in vault):
|
||||
# - proxmox_token_id: "ansible@pve!token-name"
|
||||
# - vault_proxmox_token: "secret"
|
||||
proxmox_token_id: "{{ vault_proxmox_token_id | default('') }}"
|
||||
|
||||
# Default guest type for new projects. (Later you can set to `kvm` per project/env.)
|
||||
proxmox_guest_type: lxc
|
||||
|
||||
# Proxmox LXC defaults (override per project/env as needed)
|
||||
lxc_ostemplate: "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
|
||||
lxc_storage: "local-lvm"
|
||||
lxc_network_bridge: "vmbr0"
|
||||
lxc_unprivileged: true
|
||||
lxc_features_list:
|
||||
- "keyctl=1"
|
||||
- "nesting=1"
|
||||
lxc_start_after_create: true
|
||||
lxc_nameserver: "1.1.1.1 8.8.8.8"
|
||||
|
||||
# Base OS / access defaults
|
||||
appuser_name: appuser
|
||||
appuser_shell: /bin/bash
|
||||
appuser_groups:
|
||||
- sudo
|
||||
# Store your workstation public key in vault_ssh_public_key
|
||||
appuser_ssh_public_key: "{{ vault_ssh_public_key }}"
|
||||
|
||||
# App defaults (override per project)
|
||||
app_backend_port: 3001
|
||||
app_frontend_port: 3000
|
||||
|
||||
# Default Node workflow commands (override per project if your app differs)
|
||||
app_backend_install_cmd: "npm ci"
|
||||
app_backend_migrate_cmd: "npm run migrate"
|
||||
app_backend_start_cmd: "npm start"
|
||||
|
||||
app_frontend_install_cmd: "npm ci"
|
||||
app_frontend_build_cmd: "npm run build"
|
||||
app_frontend_start_cmd: "npm start"
|
||||
|
||||
# Projects definition: add as many projects as you want here.
|
||||
# Each project has envs (dev/qa/prod) defining name/vmid/ip/gateway/branch and
|
||||
# optional env_vars (dummy placeholders by default).
|
||||
#
|
||||
# -----------------------------------------------------------------------------
|
||||
# Proxmox VMID/CTID ranges (DEDICATED; avoid collisions)
|
||||
#
|
||||
# Proxmox IDs are global. Never reuse IDs across unrelated guests.
|
||||
# Suggested reservation table (edit to your preference):
|
||||
# - 9000-9099: pote
|
||||
# - 9100-9199: punimTagFE
|
||||
# - 9200-9299: punimTagBE
|
||||
# - 9300-9399: projectA (example)
|
||||
# -----------------------------------------------------------------------------
|
||||
app_projects:
|
||||
projectA:
|
||||
description: "Example full-stack app (edit repo_url, IPs, secrets)."
|
||||
repo_url: "git@github.com:example/projectA.git"
|
||||
components:
|
||||
backend: true
|
||||
frontend: true
|
||||
|
||||
|
||||
# Repo is assumed to contain `backend/` and `frontend/` directories.
|
||||
repo_dest: "/srv/app"
|
||||
|
||||
# Optional overrides for this project
|
||||
backend_port: "{{ app_backend_port }}"
|
||||
frontend_port: "{{ app_frontend_port }}"
|
||||
|
||||
guest_defaults:
|
||||
guest_type: "{{ proxmox_guest_type }}"
|
||||
cores: 2
|
||||
memory_mb: 2048
|
||||
swap_mb: 512
|
||||
rootfs_size_gb: 16
|
||||
|
||||
deploy:
|
||||
backend_install_cmd: "{{ app_backend_install_cmd }}"
|
||||
backend_migrate_cmd: "{{ app_backend_migrate_cmd }}"
|
||||
backend_start_cmd: "{{ app_backend_start_cmd }}"
|
||||
frontend_install_cmd: "{{ app_frontend_install_cmd }}"
|
||||
frontend_build_cmd: "{{ app_frontend_build_cmd }}"
|
||||
frontend_start_cmd: "{{ app_frontend_start_cmd }}"
|
||||
|
||||
envs:
|
||||
dev:
|
||||
name: "projectA-dev"
|
||||
vmid: 9301
|
||||
ip: "10.0.10.101/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "dev"
|
||||
env_vars:
|
||||
APP_ENV: "dev"
|
||||
BACKEND_BASE_URL: "http://10.0.10.101:{{ app_backend_port }}"
|
||||
FRONTEND_BASE_URL: "http://10.0.10.101:{{ app_frontend_port }}"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
qa:
|
||||
name: "projectA-qa"
|
||||
vmid: 9302
|
||||
ip: "10.0.10.102/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "qa"
|
||||
env_vars:
|
||||
APP_ENV: "qa"
|
||||
BACKEND_BASE_URL: "http://10.0.10.102:{{ app_backend_port }}"
|
||||
FRONTEND_BASE_URL: "http://10.0.10.102:{{ app_frontend_port }}"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
prod:
|
||||
name: "projectA-prod"
|
||||
vmid: 9303
|
||||
ip: "10.0.10.103/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "main"
|
||||
pote:
|
||||
description: "POTE (python/venv + cron) project (edit repo_url, IPs, secrets)."
|
||||
repo_url: "gitea@10.0.30.169:ilia/POTE.git"
|
||||
# POTE deploys as a user-owned python app (not /srv/app)
|
||||
repo_dest: "/home/poteapp/pote"
|
||||
os_user: "poteapp"
|
||||
components:
|
||||
backend: false
|
||||
frontend: false
|
||||
guest_defaults:
|
||||
guest_type: "{{ proxmox_guest_type }}"
|
||||
cores: 2
|
||||
memory_mb: 2048
|
||||
swap_mb: 512
|
||||
rootfs_size_gb: 16
|
||||
# POTE-specific optional defaults (override per env if needed)
|
||||
pote_db_host: "localhost"
|
||||
pote_db_user: "poteuser"
|
||||
pote_db_name: "potedb"
|
||||
pote_smtp_host: "mail.levkin.ca"
|
||||
pote_smtp_port: 587
|
||||
envs:
|
||||
dev:
|
||||
name: "pote-dev"
|
||||
vmid: 9001
|
||||
ip: "10.0.10.114/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "dev"
|
||||
qa:
|
||||
name: "pote-qa"
|
||||
vmid: 9002
|
||||
ip: "10.0.10.112/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "qa"
|
||||
prod:
|
||||
name: "pote-prod"
|
||||
vmid: 9003
|
||||
ip: "10.0.10.113/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "main"
|
||||
|
||||
punimTagFE:
|
||||
description: "punimTag frontend-only project (edit repo_url, IPs, secrets)."
|
||||
repo_url: "git@github.com:example/punimTagFE.git"
|
||||
repo_dest: "/srv/app"
|
||||
components:
|
||||
backend: false
|
||||
frontend: true
|
||||
guest_defaults:
|
||||
guest_type: "{{ proxmox_guest_type }}"
|
||||
cores: 2
|
||||
memory_mb: 2048
|
||||
swap_mb: 512
|
||||
rootfs_size_gb: 16
|
||||
deploy:
|
||||
frontend_install_cmd: "{{ app_frontend_install_cmd }}"
|
||||
frontend_build_cmd: "{{ app_frontend_build_cmd }}"
|
||||
frontend_start_cmd: "{{ app_frontend_start_cmd }}"
|
||||
envs:
|
||||
dev:
|
||||
name: "punimTagFE-dev"
|
||||
vmid: 9101
|
||||
ip: "10.0.10.121/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "dev"
|
||||
env_vars:
|
||||
APP_ENV: "dev"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
qa:
|
||||
name: "punimTagFE-qa"
|
||||
vmid: 9102
|
||||
ip: "10.0.10.122/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "qa"
|
||||
env_vars:
|
||||
APP_ENV: "qa"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
prod:
|
||||
name: "punimTagFE-prod"
|
||||
vmid: 9103
|
||||
ip: "10.0.10.123/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "main"
|
||||
env_vars:
|
||||
APP_ENV: "prod"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
|
||||
punimTagBE:
|
||||
description: "punimTag backend-only project (edit repo_url, IPs, secrets)."
|
||||
repo_url: "git@github.com:example/punimTagBE.git"
|
||||
repo_dest: "/srv/app"
|
||||
components:
|
||||
backend: true
|
||||
frontend: false
|
||||
guest_defaults:
|
||||
guest_type: "{{ proxmox_guest_type }}"
|
||||
cores: 2
|
||||
memory_mb: 2048
|
||||
swap_mb: 512
|
||||
rootfs_size_gb: 16
|
||||
deploy:
|
||||
backend_install_cmd: "{{ app_backend_install_cmd }}"
|
||||
backend_migrate_cmd: "{{ app_backend_migrate_cmd }}"
|
||||
backend_start_cmd: "{{ app_backend_start_cmd }}"
|
||||
envs:
|
||||
dev:
|
||||
name: "punimTagBE-dev"
|
||||
vmid: 9201
|
||||
ip: "10.0.10.131/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "dev"
|
||||
env_vars:
|
||||
APP_ENV: "dev"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
qa:
|
||||
name: "punimTagBE-qa"
|
||||
vmid: 9202
|
||||
ip: "10.0.10.132/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "qa"
|
||||
env_vars:
|
||||
APP_ENV: "qa"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
prod:
|
||||
name: "punimTagBE-prod"
|
||||
vmid: 9203
|
||||
ip: "10.0.10.133/24"
|
||||
gateway: "10.0.10.1"
|
||||
branch: "main"
|
||||
env_vars:
|
||||
APP_ENV: "prod"
|
||||
SECRET_PLACEHOLDER: "change-me"
|
||||
|
||||
42
inventories/production/group_vars/all/vault.example.yml
Normal file
42
inventories/production/group_vars/all/vault.example.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
# Example vault values for Proxmox app projects.
|
||||
#
|
||||
# Copy required keys into your encrypted vault:
|
||||
# make edit-group-vault
|
||||
#
|
||||
# Never commit real secrets unencrypted.
|
||||
|
||||
# Proxmox API
|
||||
vault_proxmox_host: "10.0.10.201"
|
||||
vault_proxmox_user: "root@pam"
|
||||
vault_proxmox_node: "pve"
|
||||
vault_proxmox_password: "CHANGE_ME"
|
||||
|
||||
# Optional token auth (recommended if you use it)
|
||||
# vault_proxmox_token_id: "root@pam!ansible"
|
||||
# vault_proxmox_token: "CHANGE_ME"
|
||||
|
||||
# SSH public key for appuser (workstation key)
|
||||
vault_ssh_public_key: "ssh-ed25519 AAAA... you@example"
|
||||
|
||||
# LXC create bootstrap password (often required by Proxmox)
|
||||
vault_lxc_root_password: "CHANGE_ME"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# POTE (python/venv + cron) secrets
|
||||
# -----------------------------------------------------------------------------
|
||||
# Private key used for cloning from Gitea (deploy key). Store as a multi-line block.
|
||||
vault_pote_git_ssh_key: |
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
CHANGE_ME
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
|
||||
# Environment-specific DB passwords (used by roles/pote)
|
||||
vault_pote_db_password_dev: "CHANGE_ME"
|
||||
vault_pote_db_password_qa: "CHANGE_ME"
|
||||
vault_pote_db_password_prod: "CHANGE_ME"
|
||||
|
||||
# SMTP password for reports
|
||||
vault_pote_smtp_password: "CHANGE_ME"
|
||||
|
||||
|
||||
@ -1,10 +1,47 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
36343265643238633236643162613137393331386164306133666537633336633036376433386161
|
||||
3135366566623235333264386539346364333435373065300a633231633731316633313166346161
|
||||
30363334613965666634633665363632323966396464633636346533616634393664386566333230
|
||||
3463666531323866660a666238383331383562313363386639646161653334313661393065343135
|
||||
33613762653361656633366465306264323935363032353737333935363165346639616330333939
|
||||
39336538643866366361313838636338643336376365373166376234383838656430623339313162
|
||||
37353461313263643263376232393138396233366234336333613535366234383661353938663032
|
||||
65383737343164343431363764333063326230623263323231366232626131306637353361343466
|
||||
6131
|
||||
36643038376636383030343730626264613839396462366365633837636130623639393361656634
|
||||
3238353261633635353662653036393835313963373562390a646535376366656163383632313835
|
||||
39646666653362336661633736333365343962346432653131613134353361366263373162386631
|
||||
3134613438626132320a313765343338643535343837306339616564336564303166626164356530
|
||||
63663363656535303137663431613861343662303664313332626166373463393931323937613230
|
||||
66333665316331323637663437653339353737653336633864393033336630336438646162643662
|
||||
31656164363933333036376263303034646366393134636630663631353235373831303264363762
|
||||
66643865616130306537383836646237613730643133656333666632326538613764383530363363
|
||||
61386161646637316166303633643665383365346534323939383034613430386362303038313761
|
||||
36303364396436373466653332303562653038373962616539356633373065643130303036363161
|
||||
65353163326136383066393332376236386333653532326337613163346334616234643562643265
|
||||
62316134386365343733636661336130623364386634383965386135616633323132643365613231
|
||||
34636435333031376136396336316337666161383562343834383865316436633333333065323138
|
||||
37343865363731303137666330306131373734623637343531623562353332353437646631343363
|
||||
30393565376435303430396535643165616534313334326462363130626639343038643835336335
|
||||
33613630336534666163356631353438373462306566376134323536373832643264633365653465
|
||||
62386363326436623330653430383262653732376235626432656362306363303663623834653664
|
||||
31373762306539376431353137393664396165396261613364653339373765393863633833396131
|
||||
36666235666234633430373338323331313531643736656137303937653865303431643164373161
|
||||
39633238383265396366386230303536613461633431333565353433643935613231333232333063
|
||||
36643435376165656262623863373039393837643564366531666462376162653630626634663037
|
||||
39373439336239646131306133663566343734656339346462356662373561306264333364383966
|
||||
38343463616666613037636335333137633737666166633364343736646232396566373866633531
|
||||
34303734376137386363373039656565323364333539626630323465666636396465323861333365
|
||||
35376161663630356132373638333937376164316361303531303637396334306133373237656265
|
||||
36356532623130323565396531306136363339363437376364343138653139653335343765316365
|
||||
38313035366137393365316139326236326330386365343665376335313339666231333632333133
|
||||
32353865626531373462346261653832386234396531653136323162653865303861396233376261
|
||||
34616232363965313635373833333737336166643734373633313865323066393930666562316136
|
||||
36373763356365646361656436383463393237623461383531343134373336663763663464336361
|
||||
38396532383932643065303731663565353366373033353237383538636365323064396531386134
|
||||
61643964613930373439383032373364316437303239393434376465393639373634663738623461
|
||||
37386366616333626434363761326361373533306635316164316363393264303633353939613239
|
||||
37353266303637323139653630663236663633313061306633316139666539376632306630313362
|
||||
34633834326433646230303634313266303530633236353262633066396462646365623935343161
|
||||
34393166643666366164313438383939386434366665613330653739383139613732396633383261
|
||||
33633664303131383163356362316639353064373861343132623565636631333135663034373461
|
||||
61303031616634333235303066633939643337393862653031323936363932633438303035323238
|
||||
66323066353737316166383533636661336637303265343937633064626164623462656134333732
|
||||
33316536336430636636646561626232666633656266326339623732363531326131643764313838
|
||||
62356537326166346666313930383639386466633432626235373738633833393164646238366465
|
||||
62373938363739373036666238666433303061633732663565666433333631326432626461353037
|
||||
39636263636632313431353364386566383134653139393762623562643561616166633035353038
|
||||
39326462356332616563303462636536636132633933336532383938373030666333363264346632
|
||||
64643063373830353130613662323131353964313038323735626464313363326364653732323732
|
||||
3663393964633138376665323435366463623463613237366465
|
||||
|
||||
9
inventories/production/group_vars/dev/main.yml
Normal file
9
inventories/production/group_vars/dev/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Development group overrides
|
||||
# Development machines may need more permissive SSH settings
|
||||
|
||||
# Allow root login for initial setup (can be disabled after setup)
|
||||
ssh_permit_root_login: 'yes'
|
||||
|
||||
# Allow password authentication for initial setup (should be disabled after SSH keys are set up)
|
||||
ssh_password_authentication: 'yes'
|
||||
10
inventories/production/host_vars/KrakenMint.yml
Normal file
10
inventories/production/host_vars/KrakenMint.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# Host variables for KrakenMint
|
||||
|
||||
# Using root user directly, password will be prompted
|
||||
ansible_become: true
|
||||
|
||||
# Configure shell for root
|
||||
shell_users:
|
||||
- ladmin
|
||||
|
||||
8
inventories/production/host_vars/KrakenMint/vault.yml
Normal file
8
inventories/production/host_vars/KrakenMint/vault.yml
Normal file
@ -0,0 +1,8 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39353931333431383166336133363735336334376339646261353331323162343663386265393337
|
||||
3761626465643830323333613065316361623839363439630a653563306462313663393432306135
|
||||
61383936326637366635373563623038623866643230356164336436666535626239346163323665
|
||||
6339623335643238660a303031363233396466326333613831366265363839313435366235663139
|
||||
35616161333063363035326636353936633465613865313033393331313662303436646537613665
|
||||
39616336363533633833383266346562373161656332363237343665316337353764386661333664
|
||||
336163353333613762626533333437376637
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31306264346663636630656534303766666564333866326139336137383339633338323834653266
|
||||
6132333337363566623265303037336266646238633036390a663432623861363562386561393264
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66633265383239626163633134656233613638643862323562373330643363323036333334646566
|
||||
3439646635343533353432323064643135623532333738380a353866643461636233376432396434
|
||||
|
||||
16
inventories/production/host_vars/dev02.yml
Normal file
16
inventories/production/host_vars/dev02.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
# Host variables for dev02
|
||||
|
||||
# Use ladmin user with sudo to become root
|
||||
ansible_become: true
|
||||
ansible_become_method: sudo
|
||||
ansible_become_password: "{{ vault_dev02_become_password }}"
|
||||
|
||||
# Configure shell for ladmin
|
||||
shell_users:
|
||||
- ladmin
|
||||
|
||||
# Skip data science stack
|
||||
install_conda: false
|
||||
install_jupyter: false
|
||||
install_r: false
|
||||
@ -1,4 +1,5 @@
|
||||
ansible_become_password: root
|
||||
---
|
||||
ansible_become_password: "{{ vault_devgpu_become_password }}"
|
||||
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
@ -9,7 +10,7 @@ shell_additional_users:
|
||||
- devuser01
|
||||
- devuser02
|
||||
- dev
|
||||
|
||||
|
||||
# Data Science configuration (datascience role)
|
||||
install_conda: true
|
||||
conda_install_path: "/root/anaconda3"
|
||||
|
||||
2
inventories/production/host_vars/devGPU/vault.yml
Normal file
2
inventories/production/host_vars/devGPU/vault.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
vault_devgpu_become_password: root
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
# Configure sudo path for git-ci-01
|
||||
# Sudo may not be in PATH for non-interactive shells
|
||||
ansible_become_exe: /usr/bin/sudo
|
||||
@ -5,4 +6,3 @@ ansible_become_method: sudo
|
||||
|
||||
# Alternative: if sudo is in a different location, update this
|
||||
# ansible_become_exe: /usr/local/bin/sudo
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61623232353833613730343036663434633265346638366431383737623936616131356661616238
|
||||
3230346138373030396336663566353433396230346434630a313633633161303539373965343466
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31316663336338303832323464623866343366313261653536623233303466636630633235643638
|
||||
3666646431323061313836333233356162643462323763380a623666663062386337393439653134
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
62356361353835643235613335613661356230666539386533383536623432316333346431343462
|
||||
3265376632633731623430376333323234633962643766380a363033666334643930326636343963
|
||||
|
||||
@ -7,4 +7,3 @@ ansible_become_method: sudo
|
||||
# Configure shell for ladmin user
|
||||
shell_users:
|
||||
- ladmin
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35633833353965363964376161393730613065663236326239376562356231316166656131366263
|
||||
6263363436373965316339623139353830643062393165370a643138356561613537616431316534
|
||||
|
||||
@ -2,26 +2,22 @@
|
||||
# Primary IPs: Tailscale (100.x.x.x) for remote access
|
||||
# Fallback IPs: Local network (10.0.x.x) when Tailscale is down
|
||||
# Usage: ansible_host_fallback is available for manual fallback
|
||||
|
||||
[gitea]
|
||||
giteaVM ansible_host=10.0.30.169 ansible_user=root
|
||||
|
||||
[portainer]
|
||||
portainerVM ansible_host=10.0.30.69 ansible_user=ladmin
|
||||
|
||||
[homepage]
|
||||
homepageVM ansible_host=10.0.30.12 ansible_user=homepage
|
||||
|
||||
[vaultwarden]
|
||||
vaultwardenVM ansible_host=10.0.10.142 ansible_user=root
|
||||
#
|
||||
# NOTE: Proxmox app projects (dev/qa/prod) are provisioned dynamically via
|
||||
# `playbooks/app/site.yml` (it uses `add_host` based on `app_projects`).
|
||||
# You generally do NOT need to add project hosts here.
|
||||
|
||||
[dev]
|
||||
dev01 ansible_host=10.0.30.105 ansible_user=ladmin
|
||||
bottom ansible_host=10.0.10.156 ansible_user=beast
|
||||
debianDesktopVM ansible_host=10.0.10.206 ansible_user=user skip_reboot=true
|
||||
devGPU ansible_host=10.0.30.63 ansible_user=root
|
||||
|
||||
[qa]
|
||||
git-ci-01 ansible_host=10.0.10.223 ansible_user=ladmin
|
||||
sonarqube-01 ansible_host=10.0.10.54 ansible_user=ladmin
|
||||
dev02 ansible_host=10.0.10.100 ansible_user=ladmin
|
||||
KrakenMint ansible_host=10.0.10.120 ansible_user=ladmin
|
||||
|
||||
[ansible]
|
||||
ansibleVM ansible_host=10.0.10.157 ansible_user=master
|
||||
@ -34,8 +30,14 @@ caddy ansible_host=10.0.10.50 ansible_user=root
|
||||
jellyfin ansible_host=10.0.10.232 ansible_user=root
|
||||
listmonk ansible_host=10.0.10.149 ansible_user=root
|
||||
nextcloud ansible_host=10.0.10.25 ansible_user=root
|
||||
actual ansible_host=10.0.10.159 ansible_user=root
|
||||
actual ansible_host=10.0.10.158 ansible_user=root
|
||||
vikanjans ansible_host=10.0.10.159 ansible_user=root
|
||||
n8n ansible_host=10.0.10.158 ansible_user=root
|
||||
giteaVM ansible_host=10.0.30.169 ansible_user=root
|
||||
portainerVM ansible_host=10.0.30.69 ansible_user=ladmin
|
||||
homepageVM ansible_host=10.0.30.12 ansible_user=homepage
|
||||
vaultwardenVM ansible_host=10.0.10.142 ansible_user=ladmin
|
||||
qBittorrent ansible_host=10.0.10.91 ansible_user=root port=8080
|
||||
|
||||
[desktop]
|
||||
desktop-beast ansible_host=100.117.34.106 ansible_user=beast
|
||||
|
||||
2
package-lock.json
generated
2
package-lock.json
generated
@ -13,7 +13,7 @@
|
||||
"markdownlint-cli2": "^0.18.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=22.0.0",
|
||||
"node": ">=20.0.0",
|
||||
"npm": ">=10.0.0"
|
||||
}
|
||||
},
|
||||
|
||||
134
playbooks/app/configure_app.yml
Normal file
134
playbooks/app/configure_app.yml
Normal file
@ -0,0 +1,134 @@
|
||||
---
|
||||
# Playbook: app/configure_app.yml
|
||||
# Purpose: Configure OS + app runtime on app project guests created via provision_vms.yml
|
||||
# Targets: app_all or per-project group created dynamically
|
||||
# Tags: app, configure
|
||||
#
|
||||
# Usage:
|
||||
# - Run one project: ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
|
||||
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/site.yml
|
||||
|
||||
- name: Build dynamic inventory from app_projects (so configure can run standalone)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars:
|
||||
selected_projects: >-
|
||||
{{
|
||||
(app_projects | dict2items | map(attribute='key') | list)
|
||||
if (app_project is not defined or app_project | length == 0)
|
||||
else [app_project]
|
||||
}}
|
||||
app_bootstrap_user_default: root
|
||||
# If true, configure plays will use vault_lxc_root_password for initial SSH bootstrap.
|
||||
bootstrap_with_root_password_default: false
|
||||
tasks:
|
||||
- name: Validate requested project exists
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- app_project is not defined or app_project in app_projects
|
||||
fail_msg: "Requested app_project={{ app_project }} does not exist in app_projects."
|
||||
|
||||
- name: Add each project/env host (by static IP) to dynamic inventory
|
||||
ansible.builtin.add_host:
|
||||
name: "{{ app_projects[item.0].envs[item.1].name | default(item.0 ~ '-' ~ item.1) }}"
|
||||
groups:
|
||||
- "app_all"
|
||||
- "app_{{ item.0 }}_all"
|
||||
- "app_{{ item.0 }}_{{ item.1 }}"
|
||||
ansible_host: "{{ (app_projects[item.0].envs[item.1].ip | string).split('/')[0] }}"
|
||||
ansible_user: "{{ app_bootstrap_user | default(app_bootstrap_user_default) }}"
|
||||
ansible_password: >-
|
||||
{{
|
||||
vault_lxc_root_password
|
||||
if ((bootstrap_with_root_password | default(bootstrap_with_root_password_default) | bool) and (vault_lxc_root_password | default('') | length) > 0)
|
||||
else omit
|
||||
}}
|
||||
app_project: "{{ item.0 }}"
|
||||
app_env: "{{ item.1 }}"
|
||||
loop: "{{ selected_projects | product(['dev', 'qa', 'prod']) | list }}"
|
||||
when:
|
||||
- app_projects[item.0] is defined
|
||||
- app_projects[item.0].envs[item.1] is defined
|
||||
- (app_projects[item.0].envs[item.1].ip | default('')) | length > 0
|
||||
|
||||
- name: Configure app guests (base OS + app setup)
|
||||
hosts: >-
|
||||
{{
|
||||
('app_' ~ app_project ~ '_all')
|
||||
if (app_project is defined and app_project | length > 0)
|
||||
else 'app_all'
|
||||
}}
|
||||
become: true
|
||||
gather_facts: true
|
||||
|
||||
tasks:
|
||||
- name: Build project/env effective variables
|
||||
ansible.builtin.set_fact:
|
||||
project_def: "{{ app_projects[app_project] }}"
|
||||
env_def: "{{ app_projects[app_project].envs[app_env] }}"
|
||||
when: app_project is defined and app_env is defined
|
||||
|
||||
- name: Configure base OS
|
||||
ansible.builtin.include_role:
|
||||
name: base_os
|
||||
vars:
|
||||
base_os_backend_port: "{{ (project_def.backend_port | default(app_backend_port)) if project_def is defined else app_backend_port }}"
|
||||
base_os_frontend_port: "{{ (project_def.frontend_port | default(app_frontend_port)) if project_def is defined else app_frontend_port }}"
|
||||
base_os_enable_backend: "{{ project_def.components.backend | default(true) }}"
|
||||
base_os_enable_frontend: "{{ project_def.components.frontend | default(true) }}"
|
||||
base_os_user: "{{ project_def.os_user | default(appuser_name) }}"
|
||||
base_os_user_ssh_public_key: "{{ project_def.os_user_ssh_public_key | default(appuser_ssh_public_key | default('')) }}"
|
||||
# Only override when explicitly provided (avoids self-referential recursion)
|
||||
base_os_packages: "{{ project_def.base_os_packages if (project_def is defined and project_def.base_os_packages is defined) else omit }}"
|
||||
|
||||
- name: Configure POTE (python/venv + cron)
|
||||
ansible.builtin.include_role:
|
||||
name: pote
|
||||
vars:
|
||||
pote_git_repo: "{{ project_def.repo_url }}"
|
||||
pote_git_branch: "{{ env_def.branch }}"
|
||||
pote_user: "{{ project_def.os_user | default('poteapp') }}"
|
||||
pote_group: "{{ project_def.os_user | default('poteapp') }}"
|
||||
pote_app_dir: "{{ project_def.repo_dest | default('/home/' ~ (project_def.os_user | default('poteapp')) ~ '/pote') }}"
|
||||
pote_env: "{{ app_env }}"
|
||||
|
||||
pote_db_host: "{{ env_def.pote_db_host | default(project_def.pote_db_host | default('localhost')) }}"
|
||||
pote_db_name: "{{ env_def.pote_db_name | default(project_def.pote_db_name | default('potedb')) }}"
|
||||
pote_db_user: "{{ env_def.pote_db_user | default(project_def.pote_db_user | default('poteuser')) }}"
|
||||
|
||||
pote_smtp_host: "{{ env_def.pote_smtp_host | default(project_def.pote_smtp_host | default('mail.levkin.ca')) }}"
|
||||
pote_smtp_port: "{{ env_def.pote_smtp_port | default(project_def.pote_smtp_port | default(587)) }}"
|
||||
pote_smtp_user: "{{ env_def.pote_smtp_user | default(project_def.pote_smtp_user | default('')) }}"
|
||||
pote_from_email: "{{ env_def.pote_from_email | default(project_def.pote_from_email | default('')) }}"
|
||||
pote_report_recipients: "{{ env_def.pote_report_recipients | default(project_def.pote_report_recipients | default('')) }}"
|
||||
when: app_project == 'pote'
|
||||
|
||||
- name: Configure app layout + deploy + systemd
|
||||
ansible.builtin.include_role:
|
||||
name: app_setup
|
||||
vars:
|
||||
app_repo_url: "{{ project_def.repo_url }}"
|
||||
app_repo_dest: "{{ project_def.repo_dest | default('/srv/app') }}"
|
||||
app_repo_branch: "{{ env_def.branch }}"
|
||||
# app_env is already set per-host via add_host (dev/qa/prod)
|
||||
app_owner: "{{ project_def.os_user | default(appuser_name) }}"
|
||||
app_group: "{{ project_def.os_user | default(appuser_name) }}"
|
||||
|
||||
app_backend_port: "{{ project_def.backend_port | default(app_backend_port) }}"
|
||||
app_frontend_port: "{{ project_def.frontend_port | default(app_frontend_port) }}"
|
||||
app_enable_backend: "{{ project_def.components.backend | default(true) }}"
|
||||
app_enable_frontend: "{{ project_def.components.frontend | default(true) }}"
|
||||
|
||||
app_backend_install_cmd: "{{ project_def.deploy.backend_install_cmd | default(app_backend_install_cmd) }}"
|
||||
app_backend_migrate_cmd: "{{ project_def.deploy.backend_migrate_cmd | default(app_backend_migrate_cmd) }}"
|
||||
app_backend_start_cmd: "{{ project_def.deploy.backend_start_cmd | default(app_backend_start_cmd) }}"
|
||||
|
||||
app_frontend_install_cmd: "{{ project_def.deploy.frontend_install_cmd | default(app_frontend_install_cmd) }}"
|
||||
app_frontend_build_cmd: "{{ project_def.deploy.frontend_build_cmd | default(app_frontend_build_cmd) }}"
|
||||
app_frontend_start_cmd: "{{ project_def.deploy.frontend_start_cmd | default(app_frontend_start_cmd) }}"
|
||||
|
||||
app_env_vars: "{{ env_def.env_vars | default({}) }}"
|
||||
when: app_project != 'pote'
|
||||
|
||||
|
||||
235
playbooks/app/provision_one_env.yml
Normal file
235
playbooks/app/provision_one_env.yml
Normal file
@ -0,0 +1,235 @@
|
||||
---
|
||||
# Helper tasks file for playbooks/app/provision_vms.yml
|
||||
# Provisions a single (project, env) guest and adds it to dynamic inventory.
|
||||
|
||||
- name: Set environment facts
|
||||
ansible.builtin.set_fact:
|
||||
env_name: "{{ env_item.key }}"
|
||||
env_def: "{{ env_item.value }}"
|
||||
guest_name: "{{ env_item.value.name | default(project_key ~ '-' ~ env_item.key) }}"
|
||||
# vmid is optional; if omitted, we will manage idempotency by unique guest_name
|
||||
guest_vmid: "{{ env_item.value.vmid | default(none) }}"
|
||||
|
||||
- name: Normalize recreate_existing_envs to a list
|
||||
ansible.builtin.set_fact:
|
||||
recreate_envs_list: >-
|
||||
{{
|
||||
(recreate_existing_envs.split(',') | map('trim') | list)
|
||||
if (recreate_existing_envs is defined and recreate_existing_envs is string)
|
||||
else (recreate_existing_envs | default([]))
|
||||
}}
|
||||
|
||||
- name: Check if Proxmox guest already exists (by VMID when provided)
|
||||
community.proxmox.proxmox_vm_info:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
type: lxc
|
||||
vmid: "{{ guest_vmid }}"
|
||||
register: proxmox_guest_info_vmid
|
||||
when: guest_vmid is not none
|
||||
|
||||
- name: Check if Proxmox guest already exists (by name when VMID omitted)
|
||||
community.proxmox.proxmox_vm_info:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
type: lxc
|
||||
name: "{{ guest_name }}"
|
||||
register: proxmox_guest_info_name
|
||||
when: guest_vmid is none
|
||||
|
||||
- name: Set guest_exists fact
|
||||
ansible.builtin.set_fact:
|
||||
guest_exists: >-
|
||||
{{
|
||||
((proxmox_guest_info_vmid.proxmox_vms | default([])) | length > 0)
|
||||
if (guest_vmid is not none)
|
||||
else ((proxmox_guest_info_name.proxmox_vms | default([])) | length > 0)
|
||||
}}
|
||||
|
||||
- name: "Guardrail: abort if VMID exists but name does not match (prevents overwriting other guests)"
|
||||
ansible.builtin.fail:
|
||||
msg: >-
|
||||
Refusing to use VMID {{ guest_vmid }} for {{ guest_name }} because it already exists as
|
||||
"{{ (proxmox_guest_info_vmid.proxmox_vms[0].name | default('UNKNOWN')) }}".
|
||||
Pick a different vmid range in app_projects or omit vmid to auto-allocate.
|
||||
when:
|
||||
- guest_vmid is not none
|
||||
- (proxmox_guest_info_vmid.proxmox_vms | default([])) | length > 0
|
||||
- (proxmox_guest_info_vmid.proxmox_vms[0].name | default('')) != guest_name
|
||||
- not (allow_vmid_collision | default(false) | bool)
|
||||
|
||||
- name: Delete existing guest if requested (recreate)
|
||||
community.proxmox.proxmox:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
vmid: "{{ guest_vmid }}"
|
||||
purge: true
|
||||
force: true
|
||||
state: absent
|
||||
when:
|
||||
- guest_exists | bool
|
||||
- guest_vmid is not none
|
||||
- recreate_existing_guests | default(false) | bool or (env_name in recreate_envs_list)
|
||||
|
||||
- name: Mark guest as not existing after delete
|
||||
ansible.builtin.set_fact:
|
||||
guest_exists: false
|
||||
when:
|
||||
- guest_vmid is not none
|
||||
- recreate_existing_guests | default(false) | bool or (env_name in recreate_envs_list)
|
||||
|
||||
- name: "Preflight: detect IP conflicts on Proxmox (existing LXC net0 ip=)"
|
||||
community.proxmox.proxmox_vm_info:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
type: lxc
|
||||
config: current
|
||||
register: proxmox_all_lxc
|
||||
when:
|
||||
- (env_def.ip | default('')) | length > 0
|
||||
- not (allow_ip_conflicts | default(false) | bool)
|
||||
- not (guest_exists | default(false) | bool)
|
||||
|
||||
- name: Set proxmox_ip_conflicts fact
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_ip_conflicts: >-
|
||||
{%- set conflicts = [] -%}
|
||||
{%- set target_ip = ((env_def.ip | string).split('/')[0]) -%}
|
||||
{%- for vm in (proxmox_all_lxc.proxmox_vms | default([])) -%}
|
||||
{%- set cfg_net0 = (
|
||||
vm['config']['net0']
|
||||
if (
|
||||
vm is mapping and ('config' in vm)
|
||||
and (vm['config'] is mapping) and ('net0' in vm['config'])
|
||||
)
|
||||
else none
|
||||
) -%}
|
||||
{%- set vm_netif = (vm['netif'] if (vm is mapping and ('netif' in vm)) else none) -%}
|
||||
{%- set net0 = (
|
||||
cfg_net0
|
||||
if (cfg_net0 is not none)
|
||||
else (
|
||||
vm_netif['net0']
|
||||
if (vm_netif is mapping and ('net0' in vm_netif))
|
||||
else (
|
||||
vm_netif
|
||||
if (vm_netif is string)
|
||||
else (vm['net0'] if (vm is mapping and ('net0' in vm)) else '')
|
||||
)
|
||||
)
|
||||
) | string -%}
|
||||
{%- set vm_ip = (net0 | regex_search('(?:^|,)ip=([^,]+)', '\\1') | default('')) | regex_replace('/.*$', '') -%}
|
||||
{%- if (vm_ip | length) > 0 and vm_ip == target_ip -%}
|
||||
{%- set _ = conflicts.append({'vmid': (vm.vmid | default('') | string), 'name': (vm.name | default('') | string), 'net0': net0}) -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{{ conflicts }}
|
||||
when:
|
||||
- proxmox_all_lxc is defined
|
||||
- (env_def.ip | default('')) | length > 0
|
||||
- not (allow_ip_conflicts | default(false) | bool)
|
||||
- not (guest_exists | default(false) | bool)
|
||||
|
||||
- name: Abort if IP is already assigned to an existing Proxmox LXC
|
||||
ansible.builtin.fail:
|
||||
msg: >-
|
||||
Refusing to provision {{ guest_name }} because IP {{ (env_def.ip | string).split('/')[0] }}
|
||||
is already present in Proxmox LXC net0 config: {{ proxmox_ip_conflicts }}.
|
||||
Fix app_projects IPs or set -e allow_ip_conflicts=true.
|
||||
when:
|
||||
- (env_def.ip | default('')) | length > 0
|
||||
- not (allow_ip_conflicts | default(false) | bool)
|
||||
- not (guest_exists | default(false) | bool)
|
||||
- (proxmox_ip_conflicts | default([])) | length > 0
|
||||
|
||||
- name: "Preflight: fail if target IP responds (avoid accidental duplicate IP)"
|
||||
ansible.builtin.command: "ping -c 1 -W 1 {{ (env_def.ip | string).split('/')[0] }}"
|
||||
register: ip_ping
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- (env_def.ip | default('')) | length > 0
|
||||
- not (allow_ip_conflicts | default(false) | bool)
|
||||
- not (guest_exists | default(false) | bool)
|
||||
|
||||
- name: Abort if IP appears to be in use
|
||||
ansible.builtin.fail:
|
||||
msg: >-
|
||||
Refusing to provision {{ guest_name }} because IP {{ (env_def.ip | string).split('/')[0] }}
|
||||
responded to ping. Fix app_projects IPs or set -e allow_ip_conflicts=true.
|
||||
Note: this guardrail is ping-based; if your network blocks ICMP, an in-use IP may not respond.
|
||||
when:
|
||||
- (env_def.ip | default('')) | length > 0
|
||||
- not (allow_ip_conflicts | default(false) | bool)
|
||||
- not (guest_exists | default(false) | bool)
|
||||
- ip_ping.rc == 0
|
||||
|
||||
- name: Provision LXC guest for project/env
|
||||
ansible.builtin.include_role:
|
||||
name: proxmox_vm
|
||||
vars:
|
||||
# NOTE: Use hostvars['localhost'] for defaults to avoid recursive self-references
|
||||
proxmox_guest_type: "{{ project_def.guest_defaults.guest_type | default(hostvars['localhost'].proxmox_guest_type | default('lxc')) }}"
|
||||
|
||||
# Only pass vmid when provided; otherwise Proxmox will auto-allocate
|
||||
lxc_vmid: "{{ guest_vmid if guest_vmid is not none else omit }}"
|
||||
lxc_hostname: "{{ guest_name }}"
|
||||
lxc_ostemplate: "{{ project_def.lxc_ostemplate | default(hostvars['localhost'].lxc_ostemplate) }}"
|
||||
lxc_storage: "{{ project_def.lxc_storage | default(hostvars['localhost'].lxc_storage) }}"
|
||||
lxc_network_bridge: "{{ project_def.lxc_network_bridge | default(hostvars['localhost'].lxc_network_bridge) }}"
|
||||
lxc_unprivileged: "{{ project_def.lxc_unprivileged | default(hostvars['localhost'].lxc_unprivileged) }}"
|
||||
lxc_features_list: "{{ project_def.lxc_features_list | default(hostvars['localhost'].lxc_features_list) }}"
|
||||
|
||||
lxc_cores: "{{ project_def.guest_defaults.cores | default(hostvars['localhost'].lxc_cores) }}"
|
||||
lxc_memory_mb: "{{ project_def.guest_defaults.memory_mb | default(hostvars['localhost'].lxc_memory_mb) }}"
|
||||
lxc_swap_mb: "{{ project_def.guest_defaults.swap_mb | default(hostvars['localhost'].lxc_swap_mb) }}"
|
||||
lxc_rootfs_size_gb: "{{ project_def.guest_defaults.rootfs_size_gb | default(hostvars['localhost'].lxc_rootfs_size_gb) }}"
|
||||
|
||||
lxc_ip: "{{ env_def.ip }}"
|
||||
lxc_gateway: "{{ env_def.gateway }}"
|
||||
lxc_nameserver: "{{ project_def.lxc_nameserver | default(hostvars['localhost'].lxc_nameserver) }}"
|
||||
lxc_pubkey: "{{ appuser_ssh_public_key | default('') }}"
|
||||
lxc_start_after_create: "{{ project_def.lxc_start_after_create | default(hostvars['localhost'].lxc_start_after_create) }}"
|
||||
|
||||
- name: Wait for SSH to become available
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ (env_def.ip | string).split('/')[0] }}"
|
||||
port: 22
|
||||
timeout: 300
|
||||
when: (env_def.ip | default('')) | length > 0
|
||||
|
||||
- name: Add guest to dynamic inventory
|
||||
ansible.builtin.add_host:
|
||||
name: "{{ guest_name }}"
|
||||
groups:
|
||||
- "app_all"
|
||||
- "app_{{ project_key }}_all"
|
||||
- "app_{{ project_key }}_{{ env_name }}"
|
||||
ansible_host: "{{ (env_def.ip | string).split('/')[0] }}"
|
||||
ansible_user: root
|
||||
app_project: "{{ project_key }}"
|
||||
app_env: "{{ env_name }}"
|
||||
21
playbooks/app/provision_one_guest.yml
Normal file
21
playbooks/app/provision_one_guest.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# Helper tasks file for playbooks/app/provision_vms.yml
|
||||
# Provisions all envs for a single project and adds dynamic inventory hosts.
|
||||
- name: Set project definition
|
||||
ansible.builtin.set_fact:
|
||||
project_def: "{{ app_projects[project_key] }}"
|
||||
- name: "Preflight: validate env IPs are unique within project"
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (project_env_ips | length) == ((project_env_ips | unique) | length)
|
||||
fail_msg: "Duplicate IPs detected in app_projects.{{ project_key }}.envs (IPs must be unique): {{ project_env_ips }}"
|
||||
vars:
|
||||
project_env_ips: "{{ project_def.envs | dict2items | map(attribute='value.ip') | select('defined') | map('string') | map('regex_replace', '/.*$', '') | reject('equalto', '') | list }}"
|
||||
when:
|
||||
- project_def.envs is defined
|
||||
- (project_def.envs | length) > 0
|
||||
- name: Provision each environment for project
|
||||
ansible.builtin.include_tasks: provision_one_env.yml
|
||||
loop: "{{ project_def.envs | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: env_item
|
||||
36
playbooks/app/provision_vms.yml
Normal file
36
playbooks/app/provision_vms.yml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
# Playbook: app/provision_vms.yml
|
||||
# Purpose: Provision Proxmox guests for app projects (LXC-first) based on `app_projects`.
|
||||
# Targets: localhost (Proxmox API)
|
||||
# Tags: app, provision
|
||||
#
|
||||
# Usage:
|
||||
# - Run one project: ansible-playbook -i inventories/production playbooks/app/provision_vms.yml -e app_project=projectA
|
||||
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/provision_vms.yml
|
||||
|
||||
- name: Provision Proxmox guests for app projects
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars:
|
||||
selected_projects: >-
|
||||
{{
|
||||
(app_projects | dict2items | map(attribute='key') | list)
|
||||
if (app_project is not defined or app_project | length == 0)
|
||||
else [app_project]
|
||||
}}
|
||||
|
||||
tasks:
|
||||
- name: Validate requested project exists
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- app_project is not defined or app_project in app_projects
|
||||
fail_msg: "Requested app_project={{ app_project }} does not exist in app_projects."
|
||||
|
||||
- name: Provision each project/env guest via Proxmox API
|
||||
ansible.builtin.include_tasks: provision_one_guest.yml
|
||||
loop: "{{ selected_projects }}"
|
||||
loop_control:
|
||||
loop_var: project_key
|
||||
|
||||
|
||||
99
playbooks/app/proxmox_info.yml
Normal file
99
playbooks/app/proxmox_info.yml
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
# Playbook: app/proxmox_info.yml
|
||||
# Purpose: Query Proxmox API for VM/LXC info (status, node, name, vmid) and
|
||||
# optionally filter to just the guests defined in `app_projects`.
|
||||
# Targets: localhost
|
||||
# Tags: app, proxmox, info
|
||||
#
|
||||
# Usage examples:
|
||||
# - Show only projectA guests: ansible-playbook -i inventories/production playbooks/app/proxmox_info.yml -e app_project=projectA
|
||||
# - Show all VMs/CTs on the cluster: ansible-playbook -i inventories/production playbooks/app/proxmox_info.yml -e proxmox_info_all=true
|
||||
# - Restrict to only LXC: -e proxmox_info_type=lxc
|
||||
|
||||
- name: Proxmox inventory info (VMs and containers)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars:
|
||||
selected_projects: >-
|
||||
{{
|
||||
(app_projects | dict2items | map(attribute='key') | list)
|
||||
if (app_project is not defined or app_project | length == 0)
|
||||
else [app_project]
|
||||
}}
|
||||
proxmox_info_all_default: false
|
||||
proxmox_info_type_default: all # all|lxc|qemu
|
||||
|
||||
tasks:
|
||||
- name: Validate requested project exists
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- app_project is not defined or app_project in app_projects
|
||||
fail_msg: "Requested app_project={{ app_project | default('') }} does not exist in app_projects."
|
||||
|
||||
- name: Build list of expected VMIDs and names from app_projects
|
||||
ansible.builtin.set_fact:
|
||||
expected_vmids: >-
|
||||
{{
|
||||
selected_projects
|
||||
| map('extract', app_projects)
|
||||
| map(attribute='envs')
|
||||
| map('dict2items')
|
||||
| map('map', attribute='value')
|
||||
| list
|
||||
| flatten
|
||||
| map(attribute='vmid')
|
||||
| select('defined')
|
||||
| list
|
||||
}}
|
||||
expected_names: >-
|
||||
{{
|
||||
selected_projects
|
||||
| map('extract', app_projects)
|
||||
| map(attribute='envs')
|
||||
| map('dict2items')
|
||||
| map('map', attribute='value')
|
||||
| list
|
||||
| flatten
|
||||
| map(attribute='name')
|
||||
| list
|
||||
}}
|
||||
|
||||
- name: Query Proxmox for guest info
|
||||
community.proxmox.proxmox_vm_info:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node | default(omit) }}"
|
||||
type: "{{ proxmox_info_type | default(proxmox_info_type_default) }}"
|
||||
config: none
|
||||
register: proxmox_info
|
||||
|
||||
- name: Filter guests to expected VMIDs/names (unless proxmox_info_all)
|
||||
ansible.builtin.set_fact:
|
||||
filtered_guests: >-
|
||||
{{
|
||||
(proxmox_info.proxmox_vms | default([]))
|
||||
if (proxmox_info_all | default(proxmox_info_all_default) | bool)
|
||||
else (
|
||||
(proxmox_info.proxmox_vms | default([]))
|
||||
| selectattr('name', 'in', expected_names)
|
||||
| list
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Display Proxmox guest summary
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Proxmox: {{ proxmox_host }} (node={{ proxmox_node | default('any') }}, type={{ proxmox_info_type | default(proxmox_info_type_default) }})
|
||||
Showing: {{ 'ALL guests' if (proxmox_info_all | default(proxmox_info_all_default) | bool) else ('app_projects for ' ~ (selected_projects | join(', '))) }}
|
||||
|
||||
{% for g in (filtered_guests | sort(attribute='vmid')) %}
|
||||
- vmid={{ g.vmid }} type={{ g.id.split('/')[0] if g.id is defined else 'unknown' }} name={{ g.name | default('') }} node={{ g.node | default('') }} status={{ g.status | default('') }}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
15
playbooks/app/site.yml
Normal file
15
playbooks/app/site.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
# Playbook: app/site.yml
|
||||
# Purpose: End-to-end provisioning + configuration for app projects.
|
||||
# Targets: localhost (provision) + dynamic inventory groups (configure)
|
||||
# Tags: app
|
||||
|
||||
- name: Provision Proxmox guests
|
||||
import_playbook: provision_vms.yml
|
||||
tags: ['app', 'provision']
|
||||
|
||||
- name: Configure guests
|
||||
import_playbook: configure_app.yml
|
||||
tags: ['app', 'configure']
|
||||
|
||||
|
||||
51
playbooks/app/ssh_client_config.yml
Normal file
51
playbooks/app/ssh_client_config.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
# Playbook: app/ssh_client_config.yml
|
||||
# Purpose: Ensure ~/.ssh/config has convenient host aliases for project envs.
|
||||
# Targets: localhost
|
||||
# Tags: app, ssh-config
|
||||
#
|
||||
# Example:
|
||||
# ssh projectA-dev
|
||||
# ssh projectA-qa
|
||||
# ssh projectA-prod
|
||||
|
||||
- name: Configure SSH client aliases for app projects
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars:
|
||||
manage_ssh_config: "{{ manage_ssh_config | default(false) }}"
|
||||
ssh_config_path: "{{ lookup('ansible.builtin.env', 'HOME') + '/.ssh/config' }}"
|
||||
selected_projects: >-
|
||||
{{
|
||||
(app_projects | dict2items | map(attribute='key') | list)
|
||||
if (app_project is not defined or app_project | length == 0)
|
||||
else [app_project]
|
||||
}}
|
||||
|
||||
tasks:
|
||||
- name: Skip if SSH config management disabled
|
||||
ansible.builtin.meta: end_play
|
||||
when: not manage_ssh_config | bool
|
||||
|
||||
- name: Ensure ~/.ssh directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ lookup('ansible.builtin.env', 'HOME') + '/.ssh' }}"
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Add SSH config entries for each project/env
|
||||
community.general.ssh_config:
|
||||
user_ssh_config_file: "{{ ssh_config_path }}"
|
||||
host: "{{ app_projects[item.0].envs[item.1].name | default(item.0 ~ '-' ~ item.1) }}"
|
||||
hostname: "{{ (app_projects[item.0].envs[item.1].ip | string).split('/')[0] }}"
|
||||
user: "{{ appuser_name | default('appuser') }}"
|
||||
identity_file: "{{ ssh_identity_file | default(omit) }}"
|
||||
state: present
|
||||
loop: "{{ selected_projects | product(['dev', 'qa', 'prod']) | list }}"
|
||||
when:
|
||||
- app_projects[item.0] is defined
|
||||
- app_projects[item.0].envs[item.1] is defined
|
||||
- (app_projects[item.0].envs[item.1].ip | default('')) | length > 0
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
strategy: free
|
||||
|
||||
roles:
|
||||
- {role: timeshift, tags: ['timeshift', 'snapshot']} # Create snapshot before changes
|
||||
- {role: maintenance, tags: ['maintenance']}
|
||||
- {role: base, tags: ['base', 'security']}
|
||||
- {role: user, tags: ['user']}
|
||||
@ -18,11 +19,30 @@
|
||||
- {role: monitoring, tags: ['monitoring']}
|
||||
|
||||
pre_tasks:
|
||||
- name: Update apt cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Remove NodeSource repository completely (fix GPG errors)
|
||||
ansible.builtin.shell: |
|
||||
# Remove NodeSource repository file
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
# Remove NodeSource key file
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
# Remove from sources.list if present
|
||||
sed -i '/nodesource/d' /etc/apt/sources.list 2>/dev/null || true
|
||||
# Remove any cached InRelease files
|
||||
rm -f /var/lib/apt/lists/*nodesource* 2>/dev/null || true
|
||||
rm -f /var/lib/apt/lists/partial/*nodesource* 2>/dev/null || true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Update apt cache (ignore NodeSource errors)
|
||||
ansible.builtin.shell: |
|
||||
apt-get update 2>&1 | grep -v "nodesource\|NO_PUBKEY.*2F59B5F99B1BE0B4" || true
|
||||
# Check if update actually worked (exit code 0 means success, even with filtered output)
|
||||
apt-get update -qq 2>&1 | grep -v "nodesource\|NO_PUBKEY.*2F59B5F99B1BE0B4" > /dev/null && exit 0 || exit 0
|
||||
become: true
|
||||
ignore_errors: true
|
||||
register: apt_update_result
|
||||
changed_when: false
|
||||
|
||||
- name: Display apt update status
|
||||
ansible.builtin.debug:
|
||||
|
||||
@ -16,6 +16,27 @@
|
||||
- {role: shell, tags: ['shell']}
|
||||
|
||||
pre_tasks:
|
||||
- name: Check if NodeSource repository exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/apt/sources.list.d/nodesource.list
|
||||
register: nodesource_repo_file
|
||||
failed_when: false
|
||||
|
||||
- name: Check if NodeSource GPG key exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/apt/keyrings/nodesource.gpg
|
||||
register: nodesource_key_file
|
||||
failed_when: false
|
||||
|
||||
- name: Remove incorrectly configured NodeSource repository
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/sources.list.d/nodesource.list
|
||||
state: absent
|
||||
become: true
|
||||
when:
|
||||
- nodesource_repo_file.stat.exists
|
||||
- not (nodesource_key_file.stat.exists and nodesource_key_file.stat.size > 0)
|
||||
|
||||
- name: Update apt cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
|
||||
@ -13,3 +13,7 @@
|
||||
- name: Tailscale VPN deployment
|
||||
import_playbook: tailscale.yml
|
||||
tags: ['tailscale']
|
||||
|
||||
- name: App projects on Proxmox (LXC-first)
|
||||
import_playbook: app/site.yml
|
||||
tags: ['app']
|
||||
|
||||
28
playbooks/timeshift.yml
Normal file
28
playbooks/timeshift.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Timeshift operations
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: List Timeshift snapshots
|
||||
ansible.builtin.command: timeshift --list
|
||||
register: timeshift_list_result
|
||||
when: timeshift_action == "list"
|
||||
changed_when: false
|
||||
|
||||
- name: Display snapshots
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ timeshift_list_result.stdout_lines }}"
|
||||
when: timeshift_action == "list"
|
||||
|
||||
- name: Restore from snapshot
|
||||
ansible.builtin.command: timeshift --restore --snapshot "{{ timeshift_snapshot }}" --scripted # noqa command-instead-of-module
|
||||
when: timeshift_action == "restore"
|
||||
register: timeshift_restore_result
|
||||
changed_when: false
|
||||
|
||||
- name: Display restore result
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ timeshift_restore_result.stdout_lines }}"
|
||||
when: timeshift_action == "restore"
|
||||
9
provision_vms.yml
Normal file
9
provision_vms.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Wrapper playbook
|
||||
# Purpose:
|
||||
# ansible-playbook -i inventories/production provision_vms.yml -e app_project=projectA
|
||||
|
||||
- name: Provision app project guests
|
||||
import_playbook: playbooks/app/provision_vms.yml
|
||||
|
||||
|
||||
24
roles/app_setup/README.md
Normal file
24
roles/app_setup/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
# `app_setup`
|
||||
|
||||
Creates the standard app filesystem layout and runtime services:
|
||||
|
||||
- `/srv/app/backend` and `/srv/app/frontend`
|
||||
- `/srv/app/.env.<dev|qa|prod>`
|
||||
- `/usr/local/bin/deploy_app.sh` (git pull, install deps, build, migrate, restart services)
|
||||
- systemd units:
|
||||
- `app-backend.service`
|
||||
- `app-frontend.service`
|
||||
|
||||
All behavior is driven by variables so you can reuse this role for multiple projects.
|
||||
|
||||
## Variables
|
||||
|
||||
See [`defaults/main.yml`](defaults/main.yml). Common inputs in the app stack:
|
||||
|
||||
- `app_project`, `app_env` (used for naming and `.env.<env>` selection)
|
||||
- `app_repo_url`, `app_repo_dest`, `app_repo_branch`
|
||||
- `app_env_vars` (map written into `/srv/app/.env.<env>`)
|
||||
- `components.backend`, `components.frontend` (enable/disable backend/frontend setup)
|
||||
- `app_backend_dir`, `app_frontend_dir`, ports and Node.js commands
|
||||
|
||||
|
||||
40
roles/app_setup/defaults/main.yml
Normal file
40
roles/app_setup/defaults/main.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
# Role: app_setup
|
||||
# Purpose: app filesystem layout, env files, deploy script, and systemd units.
|
||||
|
||||
app_root: "/srv/app"
|
||||
app_backend_dir: "{{ app_root }}/backend"
|
||||
app_frontend_dir: "{{ app_root }}/frontend"
|
||||
|
||||
# Which environment file to render for this host: dev|qa|prod
|
||||
app_env: dev
|
||||
|
||||
# Components (useful for single-repo projects)
|
||||
app_enable_backend: true
|
||||
app_enable_frontend: true
|
||||
|
||||
# Repo settings (project-driven)
|
||||
app_repo_url: ""
|
||||
app_repo_dest: "{{ app_root }}"
|
||||
app_repo_branch: "main"
|
||||
|
||||
# Owner for app files
|
||||
app_owner: "{{ appuser_name | default('appuser') }}"
|
||||
app_group: "{{ appuser_name | default('appuser') }}"
|
||||
|
||||
# Ports
|
||||
app_backend_port: 3001
|
||||
app_frontend_port: 3000
|
||||
|
||||
# Commands (Node defaults; override per project as needed)
|
||||
app_backend_install_cmd: "npm ci"
|
||||
app_backend_migrate_cmd: "npm run migrate"
|
||||
app_backend_start_cmd: "npm start"
|
||||
|
||||
app_frontend_install_cmd: "npm ci"
|
||||
app_frontend_build_cmd: "npm run build"
|
||||
app_frontend_start_cmd: "npm start"
|
||||
|
||||
# Arbitrary environment variables for the env file
|
||||
app_env_vars: {}
|
||||
|
||||
8
roles/app_setup/handlers/main.yml
Normal file
8
roles/app_setup/handlers/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
# Role: app_setup handlers
|
||||
|
||||
- name: Reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
|
||||
|
||||
84
roles/app_setup/tasks/main.yml
Normal file
84
roles/app_setup/tasks/main.yml
Normal file
@ -0,0 +1,84 @@
|
||||
---
|
||||
# Role: app_setup
|
||||
# Purpose: create app layout, env file, deploy script, and systemd units.
|
||||
|
||||
- name: Ensure app root directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ app_root }}"
|
||||
state: directory
|
||||
owner: "{{ app_owner }}"
|
||||
group: "{{ app_group }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure backend directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ app_backend_dir }}"
|
||||
state: directory
|
||||
owner: "{{ app_owner }}"
|
||||
group: "{{ app_group }}"
|
||||
mode: "0755"
|
||||
when: app_enable_backend | bool
|
||||
|
||||
- name: Ensure frontend directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ app_frontend_dir }}"
|
||||
state: directory
|
||||
owner: "{{ app_owner }}"
|
||||
group: "{{ app_group }}"
|
||||
mode: "0755"
|
||||
when: app_enable_frontend | bool
|
||||
|
||||
- name: Deploy environment file for this env
|
||||
ansible.builtin.template:
|
||||
src: env.j2
|
||||
dest: "{{ app_root }}/.env.{{ app_env }}"
|
||||
owner: "{{ app_owner }}"
|
||||
group: "{{ app_group }}"
|
||||
mode: "0640"
|
||||
|
||||
- name: Deploy deploy script
|
||||
ansible.builtin.template:
|
||||
src: deploy_app.sh.j2
|
||||
dest: /usr/local/bin/deploy_app.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy systemd unit for backend
|
||||
ansible.builtin.template:
|
||||
src: app-backend.service.j2
|
||||
dest: /etc/systemd/system/app-backend.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Reload systemd
|
||||
when: app_enable_backend | bool
|
||||
|
||||
- name: Deploy systemd unit for frontend
|
||||
ansible.builtin.template:
|
||||
src: app-frontend.service.j2
|
||||
dest: /etc/systemd/system/app-frontend.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Reload systemd
|
||||
when: app_enable_frontend | bool
|
||||
|
||||
- name: Ensure systemd is reloaded before enabling services
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Enable and start backend service
|
||||
ansible.builtin.systemd:
|
||||
name: app-backend.service
|
||||
enabled: true
|
||||
state: started
|
||||
when: app_enable_backend | bool
|
||||
|
||||
- name: Enable and start frontend service
|
||||
ansible.builtin.systemd:
|
||||
name: app-frontend.service
|
||||
enabled: true
|
||||
state: started
|
||||
when: app_enable_frontend | bool
|
||||
|
||||
|
||||
19
roles/app_setup/templates/app-backend.service.j2
Normal file
19
roles/app_setup/templates/app-backend.service.j2
Normal file
@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=App Backend ({{ app_env }})
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{ app_owner }}
|
||||
Group={{ app_group }}
|
||||
WorkingDirectory={{ app_backend_dir }}
|
||||
EnvironmentFile={{ app_root }}/.env.{{ app_env }}
|
||||
ExecStart=/usr/bin/env bash -lc '{{ app_backend_start_cmd }}'
|
||||
Restart=on-failure
|
||||
RestartSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
19
roles/app_setup/templates/app-frontend.service.j2
Normal file
19
roles/app_setup/templates/app-frontend.service.j2
Normal file
@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=App Frontend ({{ app_env }})
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{ app_owner }}
|
||||
Group={{ app_group }}
|
||||
WorkingDirectory={{ app_frontend_dir }}
|
||||
EnvironmentFile={{ app_root }}/.env.{{ app_env }}
|
||||
ExecStart=/usr/bin/env bash -lc '{{ app_frontend_start_cmd }}'
|
||||
Restart=on-failure
|
||||
RestartSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
57
roles/app_setup/templates/deploy_app.sh.j2
Normal file
57
roles/app_setup/templates/deploy_app.sh.j2
Normal file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
# Ansible-managed deploy script
|
||||
set -euo pipefail
|
||||
|
||||
REPO_URL="{{ app_repo_url }}"
|
||||
BRANCH="{{ app_repo_branch }}"
|
||||
APP_ROOT="{{ app_repo_dest }}"
|
||||
BACKEND_DIR="{{ app_backend_dir }}"
|
||||
FRONTEND_DIR="{{ app_frontend_dir }}"
|
||||
ENV_FILE="{{ app_root }}/.env.{{ app_env }}"
|
||||
|
||||
echo "[deploy] repo=${REPO_URL} branch=${BRANCH} root=${APP_ROOT}"
|
||||
|
||||
if [[ ! -d "${APP_ROOT}/.git" ]]; then
|
||||
echo "[deploy] cloning repo"
|
||||
install -d -m 0755 "${APP_ROOT}"
|
||||
git clone --branch "${BRANCH}" --single-branch "${REPO_URL}" "${APP_ROOT}"
|
||||
fi
|
||||
|
||||
echo "[deploy] syncing branch"
|
||||
git -C "${APP_ROOT}" fetch origin --prune
|
||||
if ! git -C "${APP_ROOT}" rev-parse --verify --quiet "refs/remotes/origin/${BRANCH}" >/dev/null; then
|
||||
echo "[deploy] ERROR: branch '${BRANCH}' not found on origin"
|
||||
exit 2
|
||||
fi
|
||||
git -C "${APP_ROOT}" checkout -B "${BRANCH}" "origin/${BRANCH}"
|
||||
git -C "${APP_ROOT}" pull --ff-only origin "${BRANCH}"
|
||||
|
||||
if [[ "{{ app_enable_backend | bool }}" == "True" ]]; then
|
||||
echo "[deploy] backend install"
|
||||
cd "${BACKEND_DIR}"
|
||||
{{ app_backend_install_cmd }}
|
||||
|
||||
echo "[deploy] backend migrations"
|
||||
{{ app_backend_migrate_cmd }}
|
||||
fi
|
||||
|
||||
if [[ "{{ app_enable_frontend | bool }}" == "True" ]]; then
|
||||
echo "[deploy] frontend install"
|
||||
cd "${FRONTEND_DIR}"
|
||||
{{ app_frontend_install_cmd }}
|
||||
|
||||
echo "[deploy] frontend build"
|
||||
{{ app_frontend_build_cmd }}
|
||||
fi
|
||||
|
||||
echo "[deploy] restarting services"
|
||||
{% if app_enable_backend | bool %}
|
||||
systemctl restart app-backend.service
|
||||
{% endif %}
|
||||
{% if app_enable_frontend | bool %}
|
||||
systemctl restart app-frontend.service
|
||||
{% endif %}
|
||||
|
||||
echo "[deploy] done"
|
||||
|
||||
|
||||
13
roles/app_setup/templates/env.j2
Normal file
13
roles/app_setup/templates/env.j2
Normal file
@ -0,0 +1,13 @@
|
||||
# Ansible-managed environment file for {{ app_env }}
|
||||
# Loaded by systemd units and deploy script.
|
||||
|
||||
# Common
|
||||
APP_ENV={{ app_env }}
|
||||
BACKEND_PORT={{ app_backend_port }}
|
||||
FRONTEND_PORT={{ app_frontend_port }}
|
||||
|
||||
{% for k, v in (app_env_vars | default({})).items() %}
|
||||
{{ k }}={{ v }}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
@ -1,4 +1,19 @@
|
||||
---
|
||||
- name: Remove NodeSource repository to prevent GPG errors
|
||||
ansible.builtin.shell: |
|
||||
# Remove NodeSource repository file
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
# Remove NodeSource key file
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
# Remove from sources.list if present
|
||||
sed -i '/nodesource/d' /etc/apt/sources.list 2>/dev/null || true
|
||||
# Remove any cached InRelease files
|
||||
rm -f /var/lib/apt/lists/*nodesource* 2>/dev/null || true
|
||||
rm -f /var/lib/apt/lists/partial/*nodesource* 2>/dev/null || true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Check if applications are already installed
|
||||
ansible.builtin.package_facts:
|
||||
manager: apt
|
||||
@ -29,6 +44,7 @@
|
||||
fi
|
||||
register: brave_key_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: applications_brave_needs_install
|
||||
|
||||
- name: Check if Brave repository exists and is correct
|
||||
@ -44,6 +60,7 @@
|
||||
fi
|
||||
register: brave_repo_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: applications_brave_needs_install
|
||||
|
||||
- name: Clean up duplicate Brave repository files
|
||||
@ -55,7 +72,7 @@
|
||||
- /etc/apt/sources.list.d/brave-browser-release.sources
|
||||
become: true
|
||||
failed_when: false
|
||||
when:
|
||||
when:
|
||||
- applications_brave_needs_install
|
||||
- brave_repo_check.stdout == "wrong_config"
|
||||
|
||||
@ -64,7 +81,7 @@
|
||||
path: /usr/share/keyrings/brave-browser-archive-keyring.gpg
|
||||
state: absent
|
||||
become: true
|
||||
when:
|
||||
when:
|
||||
- applications_brave_needs_install
|
||||
- brave_key_check.stdout == "wrong_key"
|
||||
|
||||
@ -92,6 +109,14 @@
|
||||
repo: "deb [signed-by=/usr/share/keyrings/brave-browser-archive-keyring.gpg] https://brave-browser-apt-release.s3.brave.com/ stable main"
|
||||
filename: brave-browser
|
||||
state: present
|
||||
update_cache: false
|
||||
when: brave_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
- name: Update apt cache after adding Brave repository (ignore NodeSource errors)
|
||||
ansible.builtin.shell: |
|
||||
apt-get update 2>&1 | grep -v "nodesource\|NO_PUBKEY.*2F59B5F99B1BE0B4" || true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
when: brave_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
- name: Install Brave browser
|
||||
@ -108,4 +133,4 @@
|
||||
- "LibreOffice: {{ 'Installed' if 'libreoffice' in ansible_facts.packages else 'Missing' }}"
|
||||
- "Evince: {{ 'Installed' if 'evince' in ansible_facts.packages else 'Missing' }}"
|
||||
- "Brave: {{ applications_brave_check.stdout if applications_brave_check.rc == 0 else 'Not installed' }}"
|
||||
when: ansible_debug_output | default(false) | bool
|
||||
when: ansible_debug_output | default(false) | bool
|
||||
|
||||
@ -1,2 +1,8 @@
|
||||
---
|
||||
# defaults file for base
|
||||
|
||||
# Fail2ban email configuration
|
||||
# Set these in group_vars/all/main.yml or host_vars to enable email notifications
|
||||
fail2ban_destemail: "" # Empty by default - no email notifications
|
||||
fail2ban_sender: "" # Empty by default
|
||||
fail2ban_action: "%(action_mwl)s" # Mail, whois, and log action
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
- unzip
|
||||
- xclip
|
||||
- tree
|
||||
- copyq
|
||||
# Network and admin tools
|
||||
- net-tools
|
||||
- ufw
|
||||
@ -25,6 +26,9 @@
|
||||
- jq
|
||||
- ripgrep
|
||||
- fd-find
|
||||
# Power management (TLP for laptops)
|
||||
- tlp
|
||||
- tlp-rdw
|
||||
state: present
|
||||
|
||||
- name: Install yq YAML processor
|
||||
@ -68,3 +72,17 @@
|
||||
community.general.locale_gen:
|
||||
name: "{{ locale | default('en_US.UTF-8') }}"
|
||||
state: present
|
||||
|
||||
- name: Gather package facts to check for TLP
|
||||
ansible.builtin.package_facts:
|
||||
manager: apt
|
||||
when: ansible_facts.packages is not defined
|
||||
|
||||
- name: Enable and start TLP service
|
||||
ansible.builtin.systemd:
|
||||
name: tlp
|
||||
enabled: true
|
||||
state: started
|
||||
daemon_reload: true
|
||||
become: true
|
||||
when: ansible_facts.packages is defined and 'tlp' in ansible_facts.packages
|
||||
|
||||
@ -6,10 +6,14 @@ findtime = 600
|
||||
# Allow 3 failures before banning
|
||||
maxretry = 3
|
||||
|
||||
# Email notifications (uncomment and configure if needed)
|
||||
destemail = idobkin@gmail.com
|
||||
sender = idobkin@gmail.com
|
||||
action = %(action_mwl)s
|
||||
# Email notifications (configured via fail2ban_destemail variable)
|
||||
{% if fail2ban_destemail | default('') | length > 0 %}
|
||||
destemail = {{ fail2ban_destemail }}
|
||||
sender = {{ fail2ban_sender | default(fail2ban_destemail) }}
|
||||
action = {{ fail2ban_action | default('%(action_mwl)s') }}
|
||||
{% else %}
|
||||
# Email notifications disabled (set fail2ban_destemail in group_vars/all/main.yml to enable)
|
||||
{% endif %}
|
||||
|
||||
[sshd]
|
||||
enabled = true
|
||||
|
||||
21
roles/base_os/README.md
Normal file
21
roles/base_os/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# `base_os`
|
||||
|
||||
Baseline OS configuration for app guests:
|
||||
|
||||
- Installs required packages (git/curl/nodejs/npm/ufw/openssh-server/etc.)
|
||||
- Creates deployment user (default `appuser`) with passwordless sudo
|
||||
- Adds your authorized SSH key
|
||||
- Configures UFW to allow SSH + backend/frontend ports
|
||||
|
||||
## Variables
|
||||
|
||||
See [`defaults/main.yml`](defaults/main.yml). Common inputs in the app stack:
|
||||
|
||||
- `appuser_name`, `appuser_groups`, `appuser_shell`
|
||||
- `appuser_ssh_public_key` (usually `{{ vault_ssh_public_key }}`)
|
||||
- `components.backend`, `components.frontend` (enable/disable firewall rules per component)
|
||||
- `app_backend_port`, `app_frontend_port`
|
||||
|
||||
This role is used by `playbooks/app/configure_app.yml` after provisioning.
|
||||
|
||||
|
||||
32
roles/base_os/defaults/main.yml
Normal file
32
roles/base_os/defaults/main.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
# Role: base_os
|
||||
# Purpose: baseline OS configuration for app guests (packages, appuser, firewall).
|
||||
|
||||
base_os_packages:
|
||||
- git
|
||||
- curl
|
||||
- ca-certificates
|
||||
- openssh-server
|
||||
- sudo
|
||||
- ufw
|
||||
- python3
|
||||
- python3-apt
|
||||
- nodejs
|
||||
- npm
|
||||
|
||||
base_os_allow_ssh_port: 22
|
||||
|
||||
# App ports (override per project)
|
||||
base_os_backend_port: "{{ app_backend_port | default(3001) }}"
|
||||
base_os_frontend_port: "{{ app_frontend_port | default(3000) }}"
|
||||
base_os_enable_backend: true
|
||||
base_os_enable_frontend: true
|
||||
|
||||
base_os_user: "{{ appuser_name | default('appuser') }}"
|
||||
base_os_user_shell: "{{ appuser_shell | default('/bin/bash') }}"
|
||||
base_os_user_groups: "{{ appuser_groups | default(['sudo']) }}"
|
||||
base_os_user_ssh_public_key: "{{ appuser_ssh_public_key | default('') }}"
|
||||
|
||||
# If true, create passwordless sudo for base_os_user.
|
||||
base_os_passwordless_sudo: true
|
||||
|
||||
8
roles/base_os/handlers/main.yml
Normal file
8
roles/base_os/handlers/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
# Role: base_os handlers
|
||||
|
||||
- name: Reload ufw
|
||||
ansible.builtin.command: ufw reload
|
||||
changed_when: false
|
||||
|
||||
|
||||
65
roles/base_os/tasks/main.yml
Normal file
65
roles/base_os/tasks/main.yml
Normal file
@ -0,0 +1,65 @@
|
||||
---
|
||||
# Role: base_os
|
||||
# Purpose: baseline OS config for app guests.
|
||||
|
||||
- name: Ensure apt cache is up to date
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: Install baseline packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ base_os_packages }}"
|
||||
state: present
|
||||
|
||||
- name: Ensure app user exists
|
||||
ansible.builtin.user:
|
||||
name: "{{ base_os_user }}"
|
||||
shell: "{{ base_os_user_shell }}"
|
||||
groups: "{{ base_os_user_groups }}"
|
||||
append: true
|
||||
create_home: true
|
||||
state: present
|
||||
|
||||
- name: Ensure app user has authorized SSH key
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ base_os_user }}"
|
||||
state: present
|
||||
key: "{{ base_os_user_ssh_public_key }}"
|
||||
when: base_os_user_ssh_public_key | length > 0
|
||||
|
||||
- name: Configure passwordless sudo for app user
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/sudoers.d/{{ base_os_user }}"
|
||||
content: "{{ base_os_user }} ALL=(ALL) NOPASSWD:ALL\n"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0440"
|
||||
when: base_os_passwordless_sudo | bool
|
||||
|
||||
- name: Ensure UFW allows SSH
|
||||
ansible.builtin.ufw:
|
||||
rule: allow
|
||||
port: "{{ base_os_allow_ssh_port }}"
|
||||
proto: tcp
|
||||
|
||||
- name: Ensure UFW allows backend port
|
||||
ansible.builtin.ufw:
|
||||
rule: allow
|
||||
port: "{{ base_os_backend_port }}"
|
||||
proto: tcp
|
||||
when: base_os_enable_backend | bool
|
||||
|
||||
- name: Ensure UFW allows frontend port
|
||||
ansible.builtin.ufw:
|
||||
rule: allow
|
||||
port: "{{ base_os_frontend_port }}"
|
||||
proto: tcp
|
||||
when: base_os_enable_frontend | bool
|
||||
|
||||
- name: Enable UFW (deny incoming by default)
|
||||
ansible.builtin.ufw:
|
||||
state: enabled
|
||||
policy: deny
|
||||
|
||||
|
||||
@ -17,4 +17,3 @@ r_packages:
|
||||
- r-base
|
||||
- r-base-dev
|
||||
- r-recommended
|
||||
|
||||
|
||||
@ -5,4 +5,3 @@
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: base
|
||||
|
||||
|
||||
@ -200,4 +200,3 @@
|
||||
- name: Display R version
|
||||
ansible.builtin.debug:
|
||||
msg: "R version installed: {{ r_version.stdout_lines[0] if r_version.stdout_lines | length > 0 else 'Not checked in dry-run mode' }}"
|
||||
|
||||
|
||||
@ -11,12 +11,28 @@
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Check if NodeSource Node.js is installed
|
||||
- name: Check if Node.js is installed
|
||||
ansible.builtin.command: node --version
|
||||
register: node_version_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Remove NodeSource repository to fix GPG errors (always run first)
|
||||
ansible.builtin.shell: |
|
||||
# Remove NodeSource repository file to prevent GPG errors
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
# Remove NodeSource key file
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
# Clean apt cache to remove GPG errors
|
||||
apt-get update 2>&1 | grep -v "NO_PUBKEY\|nodesource\|W:" || true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Skip NodeSource setup if Node.js is already installed
|
||||
ansible.builtin.set_fact:
|
||||
skip_nodesource: "{{ node_version_check.rc == 0 }}"
|
||||
|
||||
- name: Check if NodeSource repository exists and is correct
|
||||
ansible.builtin.shell: |
|
||||
if [ -f /etc/apt/sources.list.d/nodesource.list ]; then
|
||||
@ -30,7 +46,10 @@
|
||||
fi
|
||||
register: nodesource_repo_check
|
||||
failed_when: false
|
||||
when: node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
changed_when: false # noqa command-instead-of-module
|
||||
when:
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
|
||||
- name: Check if NodeSource GPG key exists and is correct
|
||||
ansible.builtin.shell: |
|
||||
@ -45,25 +64,11 @@
|
||||
fi
|
||||
register: nodesource_key_check
|
||||
failed_when: false
|
||||
when: node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
|
||||
- name: Remove incorrect NodeSource repository
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/sources.list.d/nodesource.list
|
||||
state: absent
|
||||
become: true
|
||||
changed_when: false # noqa command-instead-of-module
|
||||
when:
|
||||
- node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
- nodesource_repo_check.stdout == "wrong_config"
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
|
||||
- name: Remove incorrect NodeSource key
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/keyrings/nodesource.gpg
|
||||
state: absent
|
||||
become: true
|
||||
when:
|
||||
- node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
- nodesource_key_check.stdout == "wrong_key"
|
||||
|
||||
- name: Create keyrings directory
|
||||
ansible.builtin.file:
|
||||
@ -72,18 +77,32 @@
|
||||
mode: '0755'
|
||||
become: true
|
||||
when:
|
||||
- node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
- nodesource_key_check is defined
|
||||
- nodesource_key_check.stdout is defined
|
||||
- nodesource_key_check.stdout in ["not_exists", "wrong_key"]
|
||||
|
||||
- name: Add NodeSource GPG key only if needed
|
||||
ansible.builtin.get_url:
|
||||
url: https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key
|
||||
dest: /etc/apt/keyrings/nodesource.gpg
|
||||
mode: '0644'
|
||||
force: true
|
||||
- name: Import NodeSource GPG key into apt keyring
|
||||
ansible.builtin.shell: |
|
||||
# Ensure keyrings directory exists
|
||||
mkdir -p /etc/apt/keyrings
|
||||
# Remove any existing broken key
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
# Download and convert key to binary format for signed-by
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
|
||||
chmod 644 /etc/apt/keyrings/nodesource.gpg
|
||||
# Verify the key file is valid
|
||||
if ! file /etc/apt/keyrings/nodesource.gpg | grep -q "PGP"; then
|
||||
echo "ERROR: Key file is not valid PGP format"
|
||||
exit 1
|
||||
fi
|
||||
become: true
|
||||
when:
|
||||
- node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
- nodesource_key_check is defined
|
||||
- nodesource_key_check.stdout is defined
|
||||
- nodesource_key_check.stdout in ["not_exists", "wrong_key"]
|
||||
|
||||
- name: Add NodeSource repository only if needed
|
||||
@ -93,7 +112,22 @@
|
||||
update_cache: false
|
||||
become: true
|
||||
when:
|
||||
- node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
- nodesource_repo_check is defined
|
||||
- nodesource_repo_check.stdout is defined
|
||||
- nodesource_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
- name: Update apt cache after adding NodeSource repository
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
when:
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
- nodesource_repo_check is defined
|
||||
- nodesource_repo_check.stdout is defined
|
||||
- nodesource_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
- name: Install Node.js 22 from NodeSource
|
||||
@ -101,7 +135,9 @@
|
||||
name: nodejs
|
||||
state: present
|
||||
become: true
|
||||
when: node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22')
|
||||
when:
|
||||
- not skip_nodesource | default(false)
|
||||
- (node_version_check.rc != 0 or not node_version_check.stdout.startswith('v22'))
|
||||
|
||||
- name: Verify Node.js installation
|
||||
ansible.builtin.command: node --version
|
||||
|
||||
@ -1,4 +1,14 @@
|
||||
---
|
||||
- name: Remove NodeSource repository to prevent GPG errors
|
||||
ansible.builtin.shell: |
|
||||
# Remove NodeSource repository file to prevent GPG errors during apt cache update
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
# Remove NodeSource key file
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Debug distribution information
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
fi
|
||||
register: docker_key_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Remove incorrect Docker GPG key
|
||||
ansible.builtin.file:
|
||||
@ -43,4 +44,3 @@
|
||||
path: /tmp/docker.gpg
|
||||
state: absent
|
||||
when: docker_key_check.stdout in ["not_exists", "wrong_key"]
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
fi
|
||||
register: docker_repo_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Remove incorrect Docker repository
|
||||
ansible.builtin.file:
|
||||
@ -26,4 +27,3 @@
|
||||
state: present
|
||||
update_cache: true
|
||||
when: docker_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
fi
|
||||
register: docker_repo_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Remove incorrect Docker repository
|
||||
ansible.builtin.file:
|
||||
@ -28,10 +29,38 @@
|
||||
become: true
|
||||
when: docker_repo_check.stdout == "wrong_config"
|
||||
|
||||
- name: Remove NodeSource repository completely before adding Docker repo
|
||||
ansible.builtin.shell: |
|
||||
# Remove NodeSource repository file
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
# Remove NodeSource key file
|
||||
rm -f /etc/apt/keyrings/nodesource.gpg
|
||||
# Remove from sources.list if present
|
||||
sed -i '/nodesource/d' /etc/apt/sources.list 2>/dev/null || true
|
||||
# Remove any cached InRelease files
|
||||
rm -f /var/lib/apt/lists/*nodesource* 2>/dev/null || true
|
||||
rm -f /var/lib/apt/lists/partial/*nodesource* 2>/dev/null || true
|
||||
become: true
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Add Docker repository for Linux Mint (using Ubuntu base) only if needed
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ docker_ubuntu_codename }} stable"
|
||||
state: present
|
||||
update_cache: true
|
||||
update_cache: false
|
||||
when: docker_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
- name: Update apt cache after adding Docker repository (ignore NodeSource errors)
|
||||
ansible.builtin.shell: |
|
||||
apt-get update 2>&1 | grep -v "nodesource\|NO_PUBKEY.*2F59B5F99B1BE0B4" || true
|
||||
# Verify update succeeded for non-nodesource repos
|
||||
if apt-get update 2>&1 | grep -q "E:"; then
|
||||
# If there are real errors (not just nodesource), fail
|
||||
if ! apt-get update 2>&1 | grep -q "nodesource"; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
become: true
|
||||
ignore_errors: true
|
||||
when: docker_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
fi
|
||||
register: docker_repo_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Remove incorrect Docker repository
|
||||
ansible.builtin.file:
|
||||
@ -26,4 +27,3 @@
|
||||
state: present
|
||||
update_cache: true
|
||||
when: docker_repo_check.stdout in ["not_exists", "wrong_config"]
|
||||
|
||||
@ -6,10 +6,14 @@ findtime = 600
|
||||
# Allow 3 failures before banning
|
||||
maxretry = 3
|
||||
|
||||
# Email notifications (uncomment and configure if needed)
|
||||
destemail = idobkin@gmail.com
|
||||
sender = idobkin@gmail.com
|
||||
action = %(action_mwl)s
|
||||
# Email notifications (configured via fail2ban_destemail variable)
|
||||
{% if fail2ban_destemail | default('') | length > 0 %}
|
||||
destemail = {{ fail2ban_destemail }}
|
||||
sender = {{ fail2ban_sender | default(fail2ban_destemail) }}
|
||||
action = {{ fail2ban_action | default('%(action_mwl)s') }}
|
||||
{% else %}
|
||||
# Email notifications disabled (set fail2ban_destemail in group_vars/all/main.yml to enable)
|
||||
{% endif %}
|
||||
|
||||
[sshd]
|
||||
enabled = true
|
||||
|
||||
27
roles/pote/README.md
Normal file
27
roles/pote/README.md
Normal file
@ -0,0 +1,27 @@
|
||||
# `pote`
|
||||
|
||||
Deploys the **POTE** project as a Python/venv application (no HTTP services required) and schedules cron jobs.
|
||||
|
||||
## What it does
|
||||
|
||||
- Installs required system packages (git, python3.11/venv, build deps, postgresql server/client)
|
||||
- Ensures a dedicated OS user exists (default: `poteapp`)
|
||||
- Creates PostgreSQL database and user
|
||||
- Clones/updates the repo from an SSH remote using a vault-backed private key
|
||||
- Creates a Python virtualenv and installs from `pyproject.toml` (editable mode)
|
||||
- Renders an environment file (default: `{{ pote_app_dir }}/.env`)
|
||||
- Runs Alembic database migrations
|
||||
- Installs cron jobs (daily/weekly/health-check)
|
||||
|
||||
## Key variables
|
||||
|
||||
See `defaults/main.yml`. Common inputs:
|
||||
|
||||
- `pote_git_repo`, `pote_git_branch`
|
||||
- `pote_git_ssh_key` (set `vault_pote_git_ssh_key` in your vault)
|
||||
- `pote_user`, `pote_app_dir`, `pote_venv_dir`
|
||||
- `pote_db_*`, `pote_smtp_*`
|
||||
- `pote_enable_cron`, `pote_*_time`, `pote_*_job`
|
||||
|
||||
|
||||
|
||||
116
roles/pote/defaults/main.yml
Normal file
116
roles/pote/defaults/main.yml
Normal file
@ -0,0 +1,116 @@
|
||||
---
|
||||
# Role: pote
|
||||
# Purpose: Deploy POTE (Python/venv + cron) from a Git repo via SSH.
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Git / source
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_git_repo: ""
|
||||
pote_git_branch: "main"
|
||||
|
||||
# SSH private key used to clone/pull (vault-backed). Keep this secret.
|
||||
# Prefer setting `vault_pote_git_ssh_key` in your vault; `vault_git_ssh_key` is supported for compatibility.
|
||||
pote_git_ssh_key: "{{ vault_pote_git_ssh_key | default(vault_git_ssh_key | default('')) }}"
|
||||
|
||||
# Host/IP for known_hosts (so first clone is non-interactive).
|
||||
pote_git_host: "10.0.30.169"
|
||||
pote_git_port: 22
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# User / paths
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_user: "poteapp"
|
||||
pote_group: "{{ pote_user }}"
|
||||
pote_app_dir: "/home/{{ pote_user }}/pote"
|
||||
pote_venv_dir: "{{ pote_app_dir }}/venv"
|
||||
pote_python_bin: "python3.11"
|
||||
|
||||
# Environment file
|
||||
pote_env_file: "{{ pote_app_dir }}/.env"
|
||||
pote_env_file_mode: "0600"
|
||||
|
||||
# Logs
|
||||
pote_logs_dir: "/home/{{ pote_user }}/logs"
|
||||
pote_log_level: "INFO"
|
||||
pote_log_file: "{{ pote_logs_dir }}/pote.log"
|
||||
|
||||
# Monitoring / alerting (optional)
|
||||
pote_market_tickers: ""
|
||||
pote_alert_min_severity: ""
|
||||
|
||||
# Optional API keys
|
||||
pote_quiverquant_api_key: ""
|
||||
pote_fmp_api_key: ""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# System deps
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_system_packages:
|
||||
- git
|
||||
- ca-certificates
|
||||
- python3.11
|
||||
- python3.11-venv
|
||||
- python3.11-dev
|
||||
- python3-pip
|
||||
- build-essential
|
||||
- postgresql
|
||||
- postgresql-contrib
|
||||
- postgresql-client
|
||||
- libpq-dev
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Database
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_db_host: "localhost"
|
||||
pote_db_port: 5432
|
||||
pote_db_name: "potedb"
|
||||
pote_db_user: "poteuser"
|
||||
# Prefer env-specific vault vars; fall back to a generic one if present.
|
||||
pote_db_password: >-
|
||||
{{
|
||||
vault_pote_db_password
|
||||
| default(
|
||||
(vault_pote_db_password_dev | default(vault_db_password_dev | default(''), true)) if pote_env == 'dev'
|
||||
else (vault_pote_db_password_qa | default(vault_db_password_qa | default(''), true)) if pote_env == 'qa'
|
||||
else (vault_pote_db_password_prod | default(vault_db_password_prod | default(''), true)) if pote_env == 'prod'
|
||||
else '',
|
||||
true
|
||||
)
|
||||
}}
|
||||
|
||||
# Convenience computed URL (commonly used by Python apps)
|
||||
pote_database_url: "postgresql://{{ pote_db_user }}:{{ pote_db_password }}@{{ pote_db_host }}:{{ pote_db_port }}/{{ pote_db_name }}"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SMTP / email
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_smtp_host: "mail.levkin.ca"
|
||||
pote_smtp_port: 587
|
||||
pote_smtp_user: ""
|
||||
pote_smtp_password: "{{ vault_pote_smtp_password | default(vault_smtp_password | default('')) }}"
|
||||
pote_from_email: ""
|
||||
pote_report_recipients: ""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Automation / cron
|
||||
# -----------------------------------------------------------------------------
|
||||
pote_enable_cron: true
|
||||
|
||||
# "minute hour" (e.g. "0 6")
|
||||
pote_daily_report_time: "0 6"
|
||||
# "minute hour dow" (e.g. "0 8 0" => Sunday 08:00)
|
||||
pote_weekly_report_time: "0 8 0"
|
||||
# "minute hour" for */6 style (e.g. "0 */6")
|
||||
pote_health_check_time: "0 */6"
|
||||
|
||||
pote_daily_report_enabled: true
|
||||
pote_weekly_report_enabled: true
|
||||
pote_health_check_enabled: true
|
||||
|
||||
# Commands (adjust to your repo’s actual scripts)
|
||||
pote_daily_job: "{{ pote_app_dir }}/scripts/automated_daily_run.sh >> {{ pote_logs_dir }}/daily_run.log 2>&1"
|
||||
pote_weekly_job: "{{ pote_app_dir }}/scripts/automated_weekly_run.sh >> {{ pote_logs_dir }}/weekly_run.log 2>&1"
|
||||
pote_health_check_job: "{{ pote_venv_dir }}/bin/python {{ pote_app_dir }}/scripts/health_check.py >> {{ pote_logs_dir }}/health_check.log 2>&1"
|
||||
|
||||
# Environment name for templating/logging (dev|qa|prod)
|
||||
pote_env: "{{ app_env | default('prod') }}"
|
||||
227
roles/pote/tasks/main.yml
Normal file
227
roles/pote/tasks/main.yml
Normal file
@ -0,0 +1,227 @@
|
||||
---
|
||||
# Role: pote
|
||||
# Purpose: Deploy POTE (python/venv) and schedule cron jobs.
|
||||
|
||||
- name: Ensure POTE system dependencies are installed
|
||||
ansible.builtin.apt:
|
||||
name: "{{ pote_system_packages }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: Ensure POTE group exists
|
||||
ansible.builtin.group:
|
||||
name: "{{ pote_group }}"
|
||||
state: present
|
||||
|
||||
- name: Ensure POTE user exists
|
||||
ansible.builtin.user:
|
||||
name: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
shell: /bin/bash
|
||||
create_home: true
|
||||
state: present
|
||||
|
||||
- name: Ensure POTE app directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ pote_app_dir }}"
|
||||
state: directory
|
||||
owner: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure SSH directory exists for POTE user
|
||||
ansible.builtin.file:
|
||||
path: "/home/{{ pote_user }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
mode: "0700"
|
||||
|
||||
- name: Install Git SSH key for POTE (vault-backed)
|
||||
ansible.builtin.copy:
|
||||
dest: "/home/{{ pote_user }}/.ssh/id_ed25519"
|
||||
content: "{{ pote_git_ssh_key }}"
|
||||
owner: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
mode: "0600"
|
||||
no_log: true
|
||||
when: (pote_git_ssh_key | default('')) | length > 0
|
||||
|
||||
- name: Fetch Git host key (ssh-keyscan)
|
||||
ansible.builtin.command: "ssh-keyscan -p {{ pote_git_port }} -H {{ pote_git_host }}"
|
||||
register: pote_ssh_keyscan
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: (pote_git_host | default('')) | length > 0
|
||||
|
||||
- name: Ensure Git host is in known_hosts for POTE user
|
||||
ansible.builtin.known_hosts:
|
||||
path: "/home/{{ pote_user }}/.ssh/known_hosts"
|
||||
name: "{{ pote_git_host }}"
|
||||
key: "{{ pote_ssh_keyscan.stdout }}"
|
||||
state: present
|
||||
when:
|
||||
- (pote_git_host | default('')) | length > 0
|
||||
- (pote_ssh_keyscan.stdout | default('')) | length > 0
|
||||
|
||||
- name: Clone/update POTE repository
|
||||
block:
|
||||
- name: Clone/update POTE repository (git over SSH)
|
||||
ansible.builtin.git:
|
||||
repo: "{{ pote_git_repo }}"
|
||||
dest: "{{ pote_app_dir }}"
|
||||
version: "{{ pote_git_branch }}"
|
||||
key_file: "/home/{{ pote_user }}/.ssh/id_ed25519"
|
||||
accept_hostkey: true
|
||||
update: true
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
register: pote_git_result
|
||||
rescue:
|
||||
- name: Abort with actionable Git SSH guidance
|
||||
ansible.builtin.fail:
|
||||
msg: >-
|
||||
Failed to clone {{ pote_git_repo }} (branch={{ pote_git_branch }}) as user {{ pote_user }}.
|
||||
Common causes:
|
||||
- vault_pote_git_ssh_key is not a valid OpenSSH private key (or is passphrase-protected)
|
||||
- the public key is not added to Gitea as a deploy key / user key with access to ilia/POTE
|
||||
- repo or branch name is wrong
|
||||
Error: {{ pote_git_result.msg | default(pote_git_result.stderr | default('unknown error')) }}
|
||||
|
||||
- name: Ensure PostgreSQL is running
|
||||
ansible.builtin.systemd:
|
||||
name: postgresql
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Check if PostgreSQL role exists
|
||||
ansible.builtin.command: "psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{{ pote_db_user }}'\""
|
||||
become: true
|
||||
become_user: postgres
|
||||
register: pote_pg_role_check
|
||||
changed_when: false
|
||||
|
||||
- name: Create PostgreSQL user for POTE
|
||||
ansible.builtin.command: "psql -c \"CREATE USER {{ pote_db_user }} WITH PASSWORD '{{ pote_db_password }}'\""
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: (pote_pg_role_check.stdout | trim) != '1'
|
||||
changed_when: true
|
||||
|
||||
- name: Ensure PostgreSQL user password is set (idempotent)
|
||||
ansible.builtin.command: "psql -c \"ALTER USER {{ pote_db_user }} WITH PASSWORD '{{ pote_db_password }}'\""
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: (pote_db_password | default('')) | length > 0
|
||||
changed_when: false
|
||||
|
||||
- name: Check if PostgreSQL database exists
|
||||
ansible.builtin.command: "psql -tAc \"SELECT 1 FROM pg_database WHERE datname='{{ pote_db_name }}'\""
|
||||
become: true
|
||||
become_user: postgres
|
||||
register: pote_pg_db_check
|
||||
changed_when: false
|
||||
|
||||
- name: Create PostgreSQL database for POTE
|
||||
ansible.builtin.command: "psql -c \"CREATE DATABASE {{ pote_db_name }} OWNER {{ pote_db_user }}\""
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: (pote_pg_db_check.stdout | trim) != '1'
|
||||
changed_when: true
|
||||
|
||||
- name: Ensure Python virtual environment exists
|
||||
ansible.builtin.command: "{{ pote_python_bin }} -m venv {{ pote_venv_dir }}"
|
||||
args:
|
||||
creates: "{{ pote_venv_dir }}/bin/activate"
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
|
||||
- name: Upgrade pip in venv
|
||||
ansible.builtin.pip:
|
||||
name: pip
|
||||
state: present
|
||||
virtualenv: "{{ pote_venv_dir }}"
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
|
||||
- name: Deploy POTE environment file
|
||||
ansible.builtin.template:
|
||||
src: env.j2
|
||||
dest: "{{ pote_env_file }}"
|
||||
owner: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
mode: "{{ pote_env_file_mode }}"
|
||||
|
||||
- name: Install POTE in editable mode (pyproject.toml)
|
||||
ansible.builtin.pip:
|
||||
name: "{{ pote_app_dir }}"
|
||||
editable: true
|
||||
virtualenv: "{{ pote_venv_dir }}"
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
|
||||
- name: Run Alembic migrations
|
||||
ansible.builtin.command: "{{ pote_venv_dir }}/bin/alembic upgrade head"
|
||||
args:
|
||||
chdir: "{{ pote_app_dir }}"
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure logs directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ pote_logs_dir }}"
|
||||
state: directory
|
||||
owner: "{{ pote_user }}"
|
||||
group: "{{ pote_group }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure automation shell scripts are executable
|
||||
ansible.builtin.file:
|
||||
path: "{{ pote_app_dir }}/scripts/{{ item }}"
|
||||
mode: "0755"
|
||||
loop:
|
||||
- automated_daily_run.sh
|
||||
- automated_weekly_run.sh
|
||||
- setup_cron.sh
|
||||
- setup_automation.sh
|
||||
become: true
|
||||
become_user: "{{ pote_user }}"
|
||||
|
||||
- name: Install cron job - daily report
|
||||
ansible.builtin.cron:
|
||||
name: "POTE daily report"
|
||||
minute: "{{ pote_daily_report_time.split()[0] }}"
|
||||
hour: "{{ pote_daily_report_time.split()[1] }}"
|
||||
job: "{{ pote_daily_job }}"
|
||||
user: "{{ pote_user }}"
|
||||
state: present
|
||||
when:
|
||||
- pote_enable_cron | bool
|
||||
- pote_daily_report_enabled | bool
|
||||
|
||||
- name: Install cron job - weekly report
|
||||
ansible.builtin.cron:
|
||||
name: "POTE weekly report"
|
||||
minute: "{{ pote_weekly_report_time.split()[0] }}"
|
||||
hour: "{{ pote_weekly_report_time.split()[1] }}"
|
||||
weekday: "{{ pote_weekly_report_time.split()[2] }}"
|
||||
job: "{{ pote_weekly_job }}"
|
||||
user: "{{ pote_user }}"
|
||||
state: present
|
||||
when:
|
||||
- pote_enable_cron | bool
|
||||
- pote_weekly_report_enabled | bool
|
||||
|
||||
- name: Install cron job - health check
|
||||
ansible.builtin.cron:
|
||||
name: "POTE health check"
|
||||
minute: "{{ pote_health_check_time.split()[0] }}"
|
||||
hour: "{{ pote_health_check_time.split()[1] }}"
|
||||
job: "{{ pote_health_check_job }}"
|
||||
user: "{{ pote_user }}"
|
||||
state: present
|
||||
when:
|
||||
- pote_enable_cron | bool
|
||||
- pote_health_check_enabled | bool
|
||||
27
roles/pote/templates/env.j2
Normal file
27
roles/pote/templates/env.j2
Normal file
@ -0,0 +1,27 @@
|
||||
### Ansible-managed POTE environment
|
||||
POTE_ENV="{{ pote_env }}"
|
||||
|
||||
# Database
|
||||
DATABASE_URL="{{ pote_database_url }}"
|
||||
|
||||
# Email
|
||||
SMTP_HOST="{{ pote_smtp_host }}"
|
||||
SMTP_PORT="{{ pote_smtp_port }}"
|
||||
SMTP_USER="{{ pote_smtp_user }}"
|
||||
SMTP_PASSWORD="{{ pote_smtp_password }}"
|
||||
FROM_EMAIL="{{ pote_from_email }}"
|
||||
REPORT_RECIPIENTS="{{ pote_report_recipients }}"
|
||||
|
||||
# Monitoring / alerting (optional)
|
||||
MARKET_MONITOR_TICKERS="{{ pote_market_tickers | default('') }}"
|
||||
ALERT_MIN_SEVERITY="{{ pote_alert_min_severity | default('') }}"
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL="{{ pote_log_level }}"
|
||||
LOG_FILE="{{ pote_log_file }}"
|
||||
|
||||
# Optional API keys
|
||||
QUIVERQUANT_API_KEY="{{ pote_quiverquant_api_key | default('') }}"
|
||||
FMP_API_KEY="{{ pote_fmp_api_key | default('') }}"
|
||||
|
||||
|
||||
@ -1,64 +1,82 @@
|
||||
# Role: proxmox_vm
|
||||
# Role: `proxmox_vm`
|
||||
|
||||
## Description
|
||||
Creates and configures virtual machines on Proxmox VE hypervisor with cloud-init support and automated provisioning.
|
||||
Provision Proxmox guests via API. This role supports **both**:
|
||||
|
||||
- **LXC containers** (`proxmox_guest_type: lxc`) via `community.proxmox.proxmox`
|
||||
- **KVM VMs** (`proxmox_guest_type: kvm`) via `community.general.proxmox_kvm`
|
||||
|
||||
The entry point is `roles/proxmox_vm/tasks/main.yml`, which dispatches to `tasks/lxc.yml` or `tasks/kvm.yml`.
|
||||
|
||||
## Requirements
|
||||
- Ansible 2.9+
|
||||
- Proxmox VE server
|
||||
- `community.general` collection
|
||||
- Valid Proxmox credentials in vault
|
||||
|
||||
## Features
|
||||
- Automated VM creation with cloud-init
|
||||
- Configurable CPU, memory, and disk resources
|
||||
- Network configuration with DHCP or static IP
|
||||
- SSH key injection for passwordless access
|
||||
- Ubuntu Server template support
|
||||
- Ansible (project tested with modern Ansible; older 2.9-era setups may need adjustments)
|
||||
- Proxmox VE API access
|
||||
- Collections:
|
||||
- `community.proxmox`
|
||||
- `community.general` (for `proxmox_kvm`)
|
||||
- Python lib on the control machine:
|
||||
- `proxmoxer` (installed by `make bootstrap` / `requirements.txt`)
|
||||
|
||||
## Variables
|
||||
## Authentication (vault-backed)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `vm_memory` | `8192` | RAM allocation in MB |
|
||||
| `vm_cores` | `2` | Number of CPU cores |
|
||||
| `vm_disk_size` | `20G` | Disk size |
|
||||
| `vm_iso` | `ubuntu-24.04-live-server-amd64.iso` | Installation ISO |
|
||||
| `vm_ciuser` | `master` | Default cloud-init user |
|
||||
| `vm_storage` | `local-lvm` | Proxmox storage backend |
|
||||
Store secrets in `inventories/production/group_vars/all/vault.yml`:
|
||||
|
||||
## Vault Variables (Required)
|
||||
- `vault_proxmox_host`
|
||||
- `vault_proxmox_user`
|
||||
- `vault_proxmox_password` (or token auth)
|
||||
- `vault_proxmox_token_id` (optional)
|
||||
- `vault_proxmox_token` (optional)
|
||||
- `vault_ssh_public_key` (used for bootstrap access where applicable)
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `vault_proxmox_host` | Proxmox server IP/hostname |
|
||||
| `vault_proxmox_user` | Proxmox username (e.g., root@pam) |
|
||||
| `vault_proxmox_password` | Proxmox password |
|
||||
| `vault_vm_cipassword` | VM default user password |
|
||||
| `vault_ssh_public_key` | SSH public key for VM access |
|
||||
## Key variables
|
||||
|
||||
## Dependencies
|
||||
- Proxmox VE server with API access
|
||||
- ISO images uploaded to Proxmox storage
|
||||
Common:
|
||||
|
||||
## Example Playbook
|
||||
- `proxmox_guest_type`: `lxc` or `kvm`
|
||||
- `proxmox_host`, `proxmox_user`, `proxmox_node`
|
||||
- `proxmox_api_port` (default `8006`)
|
||||
- `proxmox_validate_certs` (default `false`)
|
||||
|
||||
LXC (`tasks/lxc.yml`):
|
||||
|
||||
- `lxc_vmid`, `lxc_hostname`
|
||||
- `lxc_ostemplate` (e.g. `local:vztmpl/debian-12-standard_*.tar.zst`)
|
||||
- `lxc_storage` (default `local-lvm`)
|
||||
- `lxc_network_bridge` (default `vmbr0`)
|
||||
- `lxc_ip` (CIDR), `lxc_gateway`
|
||||
- `lxc_cores`, `lxc_memory_mb`, `lxc_swap_mb`, `lxc_rootfs_size_gb`
|
||||
|
||||
KVM (`tasks/kvm.yml`):
|
||||
|
||||
- `vm_id`, `vm_name`
|
||||
- `vm_cores`, `vm_memory`, `vm_disk_size`
|
||||
- `vm_storage`, `vm_network_bridge`
|
||||
- cloud-init parameters used by the existing KVM provisioning flow
|
||||
|
||||
## Safety guardrails
|
||||
|
||||
LXC provisioning includes a VMID collision guardrail:
|
||||
|
||||
- If the target VMID already exists but the guest name does not match the expected name, provisioning fails.
|
||||
- Override only if you really mean it: `-e allow_vmid_collision=true`
|
||||
|
||||
## Example usage
|
||||
|
||||
Provisioning is typically orchestrated by `playbooks/app/provision_vms.yml`, but you can call the role directly:
|
||||
|
||||
```yaml
|
||||
- hosts: localhost
|
||||
roles:
|
||||
- role: proxmox_vm
|
||||
vm_name: "test-vm"
|
||||
vm_id: 999
|
||||
vm_memory: 4096
|
||||
```
|
||||
|
||||
## Tags
|
||||
- `proxmox`: All Proxmox operations
|
||||
- `vm`: VM creation tasks
|
||||
- `infrastructure`: Infrastructure provisioning
|
||||
|
||||
## Notes
|
||||
- Requires Proxmox API credentials in vault
|
||||
- VM IDs must be unique on Proxmox cluster
|
||||
- Cloud-init requires compatible ISO images
|
||||
- VMs are created but not started by default
|
||||
- name: Provision one LXC
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Create/update container
|
||||
ansible.builtin.include_role:
|
||||
name: proxmox_vm
|
||||
vars:
|
||||
proxmox_guest_type: lxc
|
||||
lxc_vmid: 9301
|
||||
lxc_hostname: projectA-dev
|
||||
lxc_ip: "10.0.10.101/24"
|
||||
lxc_gateway: "10.0.10.1"
|
||||
```
|
||||
@ -25,3 +25,31 @@ vm_nameservers: "8.8.8.8 8.8.4.4"
|
||||
vm_start_after_create: true
|
||||
vm_enable_agent: true
|
||||
vm_boot_order: "order=scsi0"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Proxmox LXC defaults (used when proxmox_guest_type == 'lxc')
|
||||
# -----------------------------------------------------------------------------
|
||||
lxc_vmid: 300
|
||||
lxc_hostname: "app-container"
|
||||
lxc_ostemplate: "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
|
||||
lxc_storage: "local-lvm"
|
||||
lxc_network_bridge: "vmbr0"
|
||||
lxc_ip: "" # e.g. "10.0.10.101/24"
|
||||
lxc_gateway: "" # e.g. "10.0.10.1"
|
||||
lxc_nameserver: "1.1.1.1 8.8.8.8"
|
||||
|
||||
lxc_unprivileged: true
|
||||
# Use list form because community.proxmox.proxmox expects list for `features`
|
||||
lxc_features_list:
|
||||
- "keyctl=1"
|
||||
- "nesting=1"
|
||||
|
||||
lxc_cores: 2
|
||||
lxc_memory_mb: 2048
|
||||
lxc_swap_mb: 512
|
||||
lxc_rootfs_size_gb: 16
|
||||
|
||||
# Add to /root/.ssh/authorized_keys (bootstrap). Override with appuser_ssh_public_key.
|
||||
lxc_pubkey: ""
|
||||
|
||||
lxc_start_after_create: true
|
||||
|
||||
82
roles/proxmox_vm/tasks/kvm.yml
Normal file
82
roles/proxmox_vm/tasks/kvm.yml
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
# Proxmox QEMU VM provisioning via API (cloud-init).
|
||||
# This task file preserves the repo's existing VM behavior.
|
||||
|
||||
# Break down the Proxmox VM creation to avoid "file name too long" error
|
||||
- name: Set VM configuration facts
|
||||
ansible.builtin.set_fact:
|
||||
vm_scsi_config:
|
||||
scsi0: "{{ vm_storage }}:{{ vm_disk_size }},format=raw"
|
||||
vm_net_config:
|
||||
net0: "virtio,bridge={{ vm_network_bridge }},firewall=1"
|
||||
vm_ide_config:
|
||||
ide2: "{{ vm_iso_storage }}:cloudinit,format=qcow2"
|
||||
vm_ipconfig:
|
||||
ipconfig0: "{{ vm_ip_config }}"
|
||||
|
||||
- name: Create VM on Proxmox
|
||||
community.general.proxmox_kvm:
|
||||
# Connection
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
|
||||
|
||||
# VM identification
|
||||
vmid: "{{ vm_id }}"
|
||||
name: "{{ vm_name }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
|
||||
# Hardware specs
|
||||
memory: "{{ vm_memory }}"
|
||||
cores: "{{ vm_cores }}"
|
||||
sockets: "{{ vm_sockets }}"
|
||||
cpu: "host"
|
||||
|
||||
# Storage and network
|
||||
scsi: "{{ vm_scsi_config }}"
|
||||
net: "{{ vm_net_config }}"
|
||||
ide: "{{ vm_ide_config }}"
|
||||
|
||||
# Boot and OS
|
||||
boot: "{{ vm_boot_order }}"
|
||||
ostype: "{{ vm_os_type }}"
|
||||
|
||||
# Cloud-init
|
||||
ciuser: "{{ vm_ciuser }}"
|
||||
cipassword: "{{ vault_vm_cipassword | default(omit) }}"
|
||||
sshkeys: "{{ vm_ssh_keys | join('\n') if vm_ssh_keys else omit }}"
|
||||
ipconfig: "{{ vm_ipconfig }}"
|
||||
nameserver: "{{ vm_nameservers }}"
|
||||
|
||||
# VM options
|
||||
agent: "{{ vm_enable_agent | bool }}"
|
||||
autostart: false
|
||||
balloon: 0
|
||||
state: present
|
||||
register: vm_creation_result
|
||||
|
||||
- name: Start VM if requested
|
||||
community.general.proxmox_kvm:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
|
||||
vmid: "{{ vm_id }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
state: started
|
||||
when: vm_start_after_create | bool
|
||||
|
||||
- name: Display VM creation results
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
VM Created: {{ vm_name }} (ID: {{ vm_id }})
|
||||
Memory: {{ vm_memory }}MB
|
||||
Cores: {{ vm_cores }}
|
||||
Storage: {{ vm_storage }}:{{ vm_disk_size }}
|
||||
Network: {{ vm_network_bridge }}
|
||||
Status: {{ vm_creation_result.msg | default('Created') }}
|
||||
|
||||
|
||||
82
roles/proxmox_vm/tasks/lxc.yml
Normal file
82
roles/proxmox_vm/tasks/lxc.yml
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
# Proxmox LXC container provisioning via API.
|
||||
#
|
||||
# This uses `community.proxmox.proxmox` because it is widely available and
|
||||
# supports idempotent updates via `update: true`.
|
||||
|
||||
- name: Build LXC netif configuration
|
||||
ansible.builtin.set_fact:
|
||||
lxc_netif_config:
|
||||
# IMPORTANT: Proxmox requires net0 to be a single comma-delimited string.
|
||||
# Avoid folded YAML blocks here (they can introduce newlines/spaces).
|
||||
net0: >-
|
||||
{{
|
||||
(
|
||||
['name=eth0', 'bridge=' ~ lxc_network_bridge, 'firewall=1']
|
||||
+ (['ip=' ~ lxc_ip] if (lxc_ip is defined and (lxc_ip | string | length) > 0) else [])
|
||||
+ (['gw=' ~ lxc_gateway] if (lxc_gateway is defined and (lxc_gateway | string | length) > 0) else [])
|
||||
) | join(',')
|
||||
}}
|
||||
|
||||
- name: Ensure LXC container is present (create or update)
|
||||
community.proxmox.proxmox:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
# Only pass token params when they are set (avoid empty-string triggering required-together errors)
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
|
||||
node: "{{ proxmox_node }}"
|
||||
vmid: "{{ lxc_vmid | default(omit) }}"
|
||||
hostname: "{{ lxc_hostname }}"
|
||||
|
||||
ostemplate: "{{ lxc_ostemplate }}"
|
||||
unprivileged: "{{ lxc_unprivileged | bool }}"
|
||||
features: "{{ lxc_features_list | default(omit) }}"
|
||||
|
||||
cores: "{{ lxc_cores }}"
|
||||
memory: "{{ lxc_memory_mb }}"
|
||||
swap: "{{ lxc_swap_mb }}"
|
||||
|
||||
# rootfs sizing (GiB). disk_volume is less version-sensitive than string `disk`.
|
||||
disk_volume:
|
||||
storage: "{{ lxc_storage }}"
|
||||
size: "{{ lxc_rootfs_size_gb }}"
|
||||
|
||||
netif: "{{ lxc_netif_config }}"
|
||||
nameserver: "{{ lxc_nameserver | default(omit) }}"
|
||||
|
||||
# Bootstrap root SSH access (used by Ansible until appuser exists).
|
||||
pubkey: "{{ lxc_pubkey | default(omit) }}"
|
||||
password: "{{ vault_lxc_root_password | default(omit) }}"
|
||||
|
||||
update: true
|
||||
state: present
|
||||
register: lxc_present
|
||||
|
||||
- name: Ensure LXC container is started
|
||||
community.proxmox.proxmox:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_port: "{{ proxmox_api_port | default(8006) }}"
|
||||
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password | default(omit) }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
vmid: "{{ lxc_vmid | default(omit) }}"
|
||||
state: started
|
||||
when: lxc_start_after_create | bool
|
||||
|
||||
- name: Display LXC provisioning results
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
LXC Present: {{ lxc_hostname }} (VMID: {{ lxc_vmid }})
|
||||
Cores: {{ lxc_cores }}
|
||||
Memory: {{ lxc_memory_mb }}MB (swap {{ lxc_swap_mb }}MB)
|
||||
RootFS: {{ lxc_storage }}:{{ lxc_rootfs_size_gb }}
|
||||
Net: {{ lxc_network_bridge }} / {{ lxc_ip | default('dhcp/unspecified') }}
|
||||
Changed: {{ lxc_present.changed | default(false) }}
|
||||
@ -1,77 +1,13 @@
|
||||
---
|
||||
# Break down the Proxmox VM creation to avoid "file name too long" error
|
||||
- name: Set VM configuration facts
|
||||
ansible.builtin.set_fact:
|
||||
vm_scsi_config:
|
||||
scsi0: "{{ vm_storage }}:{{ vm_disk_size }},format=raw"
|
||||
vm_net_config:
|
||||
net0: "virtio,bridge={{ vm_network_bridge }},firewall=1"
|
||||
vm_ide_config:
|
||||
ide2: "{{ vm_iso_storage }}:cloudinit,format=qcow2"
|
||||
vm_ipconfig:
|
||||
ipconfig0: "{{ vm_ip_config }}"
|
||||
# Proxmox guest provisioning dispatcher.
|
||||
#
|
||||
# - `proxmox_guest_type: lxc` uses `tasks/lxc.yml`
|
||||
# - default uses `tasks/kvm.yml` (existing behavior)
|
||||
|
||||
- name: Create VM on Proxmox
|
||||
community.general.proxmox_kvm:
|
||||
# Connection
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
|
||||
- name: Provision LXC container
|
||||
ansible.builtin.include_tasks: lxc.yml
|
||||
when: (proxmox_guest_type | default('kvm')) == 'lxc'
|
||||
|
||||
# VM identification
|
||||
vmid: "{{ vm_id }}"
|
||||
name: "{{ vm_name }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
|
||||
# Hardware specs
|
||||
memory: "{{ vm_memory }}"
|
||||
cores: "{{ vm_cores }}"
|
||||
sockets: "{{ vm_sockets }}"
|
||||
cpu: "host"
|
||||
|
||||
# Storage and network
|
||||
scsi: "{{ vm_scsi_config }}"
|
||||
net: "{{ vm_net_config }}"
|
||||
ide: "{{ vm_ide_config }}"
|
||||
|
||||
# Boot and OS
|
||||
boot: "{{ vm_boot_order }}"
|
||||
ostype: "{{ vm_os_type }}"
|
||||
|
||||
# Cloud-init
|
||||
ciuser: "{{ vm_ciuser }}"
|
||||
cipassword: "{{ vault_vm_cipassword | default(omit) }}"
|
||||
sshkeys: "{{ vm_ssh_keys | join('\n') if vm_ssh_keys else omit }}"
|
||||
ipconfig: "{{ vm_ipconfig }}"
|
||||
nameserver: "{{ vm_nameservers }}"
|
||||
|
||||
# VM options
|
||||
agent: "{{ vm_enable_agent | bool }}"
|
||||
autostart: false
|
||||
balloon: 0
|
||||
state: present
|
||||
register: vm_creation_result
|
||||
|
||||
- name: Start VM if requested
|
||||
community.general.proxmox_kvm:
|
||||
api_host: "{{ proxmox_host }}"
|
||||
api_user: "{{ proxmox_user }}"
|
||||
api_password: "{{ vault_proxmox_password }}"
|
||||
api_token_id: "{{ proxmox_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
|
||||
vmid: "{{ vm_id }}"
|
||||
node: "{{ proxmox_node }}"
|
||||
state: started
|
||||
when: vm_start_after_create | bool
|
||||
|
||||
- name: Display VM creation results
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
VM Created: {{ vm_name }} (ID: {{ vm_id }})
|
||||
Memory: {{ vm_memory }}MB
|
||||
Cores: {{ vm_cores }}
|
||||
Storage: {{ vm_storage }}:{{ vm_disk_size }}
|
||||
Network: {{ vm_network_bridge }}
|
||||
Status: {{ vm_creation_result.msg | default('Created') }}
|
||||
- name: Provision QEMU VM (cloud-init)
|
||||
ansible.builtin.include_tasks: kvm.yml
|
||||
when: (proxmox_guest_type | default('kvm')) != 'lxc'
|
||||
|
||||
@ -165,10 +165,6 @@ alias dcb="docker-compose build"
|
||||
alias dps="docker ps"
|
||||
alias di="docker images"
|
||||
|
||||
# IDE - suppress root warnings
|
||||
alias code="code --no-sandbox --user-data-dir=/root/.vscode-root"
|
||||
alias cursor="cursor --no-sandbox --disable-gpu-sandbox --appimage-extract-and-run --user-data-dir=/root/.cursor-root"
|
||||
|
||||
# Date and time
|
||||
alias now="date +'%Y-%m-%d %H:%M:%S'"
|
||||
alias today="date +'%Y-%m-%d'"
|
||||
|
||||
@ -101,4 +101,4 @@
|
||||
- " 1. Log out and back in (recommended)"
|
||||
- " 2. Run: exec zsh"
|
||||
- " 3. Or simply run: zsh"
|
||||
- "=========================================="
|
||||
- "=========================================="
|
||||
|
||||
@ -2,8 +2,10 @@
|
||||
# SSH server configuration
|
||||
ssh_port: 22
|
||||
ssh_listen_addresses: ['0.0.0.0']
|
||||
ssh_permit_root_login: 'yes'
|
||||
ssh_password_authentication: 'yes'
|
||||
# Security defaults - hardened by default
|
||||
# Override in group_vars for dev/desktop machines if needed
|
||||
ssh_permit_root_login: 'prohibit-password' # Allow root only with keys, not passwords
|
||||
ssh_password_authentication: 'no' # Disable password auth by default (use keys)
|
||||
ssh_pubkey_authentication: 'yes'
|
||||
ssh_max_auth_tries: 3
|
||||
ssh_client_alive_interval: 300
|
||||
|
||||
@ -33,7 +33,16 @@
|
||||
name: OpenSSH
|
||||
failed_when: false
|
||||
|
||||
- name: Enable UFW with deny default policy
|
||||
- name: Set UFW default policy for incoming (deny)
|
||||
community.general.ufw:
|
||||
direction: incoming
|
||||
policy: deny
|
||||
|
||||
- name: Set UFW default policy for outgoing (allow)
|
||||
community.general.ufw:
|
||||
direction: outgoing
|
||||
policy: allow
|
||||
|
||||
- name: Enable UFW firewall
|
||||
community.general.ufw:
|
||||
state: enabled
|
||||
policy: deny
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
fi
|
||||
register: tailscale_key_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: tailscale_version_check.rc != 0
|
||||
|
||||
- name: Check if Tailscale repository exists and is correct
|
||||
@ -33,6 +34,7 @@
|
||||
fi
|
||||
register: tailscale_repo_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: tailscale_version_check.rc != 0
|
||||
|
||||
- name: Remove incorrect Tailscale GPG key
|
||||
|
||||
100
roles/timeshift/README.md
Normal file
100
roles/timeshift/README.md
Normal file
@ -0,0 +1,100 @@
|
||||
# Timeshift Role
|
||||
|
||||
Manages Timeshift system snapshots for backup and rollback capabilities.
|
||||
|
||||
## Purpose
|
||||
|
||||
This role installs and configures Timeshift, a system restore utility for Linux. It can automatically create snapshots before playbook execution to enable easy rollback if something goes wrong.
|
||||
|
||||
## Features
|
||||
|
||||
- Installs Timeshift package
|
||||
- Creates automatic snapshots before playbook runs
|
||||
- Configurable snapshot retention
|
||||
- Easy rollback capability
|
||||
|
||||
## Variables
|
||||
|
||||
### Installation
|
||||
- `timeshift_install` (default: `true`) - Install Timeshift package
|
||||
|
||||
### Snapshot Settings
|
||||
- `timeshift_auto_snapshot` (default: `true`) - Automatically create snapshot before playbook execution
|
||||
- `timeshift_snapshot_description` (default: `"Ansible playbook snapshot"`) - Description for snapshots
|
||||
- `timeshift_snapshot_tags` (default: `["ansible", "pre-playbook"]`) - Tags for snapshots
|
||||
- `timeshift_snapshot_type` (default: `"RSYNC"`) - Snapshot type: RSYNC or BTRFS
|
||||
|
||||
### Retention
|
||||
- `timeshift_keep_daily` (default: `7`) - Keep daily snapshots for N days
|
||||
- `timeshift_keep_weekly` (default: `4`) - Keep weekly snapshots for N weeks
|
||||
- `timeshift_keep_monthly` (default: `6`) - Keep monthly snapshots for N months
|
||||
|
||||
### Location
|
||||
- `timeshift_snapshot_location` (default: `"/timeshift"`) - Where to store snapshots
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
Add to your playbook:
|
||||
```yaml
|
||||
roles:
|
||||
- { role: timeshift, tags: ['timeshift', 'snapshot'] }
|
||||
```
|
||||
|
||||
### Disable Auto-Snapshot
|
||||
|
||||
```yaml
|
||||
roles:
|
||||
- { role: timeshift, tags: ['timeshift'] }
|
||||
```
|
||||
|
||||
In host_vars or group_vars:
|
||||
```yaml
|
||||
timeshift_auto_snapshot: false
|
||||
```
|
||||
|
||||
### Manual Snapshot
|
||||
|
||||
```bash
|
||||
# On the target host
|
||||
sudo timeshift --create --comments "Manual snapshot before changes"
|
||||
```
|
||||
|
||||
### Rollback
|
||||
|
||||
```bash
|
||||
# List snapshots
|
||||
sudo timeshift --list
|
||||
|
||||
# Restore from snapshot
|
||||
sudo timeshift --restore --snapshot 'YYYY-MM-DD_HH-MM-SS'
|
||||
|
||||
# Or use the Makefile target
|
||||
make timeshift-restore HOST=dev02 SNAPSHOT=2025-12-17_21-30-00
|
||||
```
|
||||
|
||||
## Integration with Playbooks
|
||||
|
||||
The role is designed to be run early in playbooks to create snapshots before making changes:
|
||||
|
||||
```yaml
|
||||
roles:
|
||||
- { role: timeshift, tags: ['timeshift', 'snapshot'] } # Create snapshot first
|
||||
- { role: base }
|
||||
- { role: development }
|
||||
# ... other roles
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Debian/Ubuntu-based system
|
||||
- Root/sudo access
|
||||
|
||||
## Notes
|
||||
|
||||
- Snapshots require significant disk space
|
||||
- RSYNC snapshots are larger but work on any filesystem
|
||||
- BTRFS snapshots are smaller but require BTRFS filesystem
|
||||
- Snapshots exclude `/home` by default (configurable)
|
||||
|
||||
21
roles/timeshift/defaults/main.yml
Normal file
21
roles/timeshift/defaults/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# Timeshift role defaults
|
||||
|
||||
# Install Timeshift
|
||||
timeshift_install: true
|
||||
|
||||
# Timeshift snapshot settings
|
||||
timeshift_snapshot_type: "RSYNC" # RSYNC or BTRFS
|
||||
timeshift_snapshot_description: "Ansible playbook snapshot"
|
||||
timeshift_snapshot_tags: ["ansible", "pre-playbook"]
|
||||
|
||||
# Auto-create snapshot before playbook runs
|
||||
timeshift_auto_snapshot: true
|
||||
|
||||
# Retention settings
|
||||
timeshift_keep_daily: 7
|
||||
timeshift_keep_weekly: 4
|
||||
timeshift_keep_monthly: 6
|
||||
|
||||
# Snapshot location (default: /timeshift)
|
||||
timeshift_snapshot_location: "/timeshift"
|
||||
52
roles/timeshift/tasks/main.yml
Normal file
52
roles/timeshift/tasks/main.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
- name: Check if Timeshift is installed
|
||||
ansible.builtin.command: timeshift --version
|
||||
register: timeshift_check
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Install Timeshift
|
||||
ansible.builtin.apt:
|
||||
name: timeshift
|
||||
state: present
|
||||
become: true
|
||||
when:
|
||||
- timeshift_install | default(true) | bool
|
||||
- timeshift_check.rc != 0
|
||||
|
||||
- name: Create Timeshift snapshot directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ timeshift_snapshot_location }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: true
|
||||
when: timeshift_install | default(true) | bool
|
||||
|
||||
- name: Create snapshot before playbook execution
|
||||
ansible.builtin.command: >
|
||||
timeshift --create
|
||||
--comments "{{ timeshift_snapshot_description }}"
|
||||
--tags {{ timeshift_snapshot_tags | join(',') }}
|
||||
--scripted
|
||||
become: true
|
||||
register: timeshift_snapshot_result
|
||||
when:
|
||||
- timeshift_auto_snapshot | default(true) | bool
|
||||
- timeshift_check.rc == 0 or timeshift_install | default(true) | bool
|
||||
changed_when: "'Snapshot created successfully' in timeshift_snapshot_result.stdout or 'Created snapshot' in timeshift_snapshot_result.stdout"
|
||||
failed_when: >
|
||||
timeshift_snapshot_result.rc != 0
|
||||
and "'already exists' not in timeshift_snapshot_result.stderr | default('')"
|
||||
and "'Snapshot created' not in timeshift_snapshot_result.stderr | default('')"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Display snapshot information
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "Timeshift snapshot operation completed"
|
||||
- "Output: {{ timeshift_snapshot_result.stdout | default('Check with: sudo timeshift --list') }}"
|
||||
- "To list snapshots: sudo timeshift --list"
|
||||
- "To restore: sudo timeshift --restore --snapshot 'SNAPSHOT_NAME'"
|
||||
when:
|
||||
- timeshift_auto_snapshot | default(true) | bool
|
||||
- timeshift_snapshot_result is defined
|
||||
11
site.yml
Normal file
11
site.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
# Wrapper playbook
|
||||
# Purpose: allow running from repo root:
|
||||
# ansible-playbook -i inventories/production site.yml
|
||||
#
|
||||
# This delegates to the main site playbook under playbooks/.
|
||||
|
||||
- name: Main site
|
||||
import_playbook: playbooks/site.yml
|
||||
|
||||
|
||||
@ -141,39 +141,91 @@ class ConnectivityTester:
|
||||
return result
|
||||
|
||||
def _analyze_connectivity(self, result: Dict) -> Tuple[str, str]:
|
||||
"""Analyze connectivity results and provide recommendations."""
|
||||
hostname = result['hostname']
|
||||
primary_ip = result['primary_ip']
|
||||
fallback_ip = result['fallback_ip']
|
||||
|
||||
# Primary IP works perfectly
|
||||
if result['primary_ping'] and result['primary_ssh']:
|
||||
return 'success', f"✓ {hostname} is fully accessible via primary IP {primary_ip}"
|
||||
|
||||
# Primary ping works but SSH fails
|
||||
if result['primary_ping'] and not result['primary_ssh']:
|
||||
error = result['primary_ssh_error']
|
||||
if 'Permission denied' in error:
|
||||
return 'ssh_key', f"⚠ {hostname}: SSH key issue on {primary_ip} - run: make copy-ssh-key HOST={hostname}"
|
||||
elif 'Connection refused' in error:
|
||||
return 'ssh_service', f"⚠ {hostname}: SSH service not running on {primary_ip}"
|
||||
else:
|
||||
return 'ssh_error', f"⚠ {hostname}: SSH error on {primary_ip} - {error}"
|
||||
|
||||
# Primary IP fails, test fallback
|
||||
if not result['primary_ping'] and fallback_ip:
|
||||
if result['fallback_ping'] and result['fallback_ssh']:
|
||||
return 'use_fallback', f"→ {hostname}: Switch to fallback IP {fallback_ip} (primary {primary_ip} failed)"
|
||||
elif result['fallback_ping'] and not result['fallback_ssh']:
|
||||
return 'fallback_ssh', f"⚠ {hostname}: Fallback IP {fallback_ip} reachable but SSH failed"
|
||||
else:
|
||||
return 'both_failed', f"✗ {hostname}: Both primary {primary_ip} and fallback {fallback_ip} failed"
|
||||
|
||||
# No fallback IP and primary failed
|
||||
if not result['primary_ping'] and not fallback_ip:
|
||||
return 'no_fallback', f"✗ {hostname}: Primary IP {primary_ip} failed, no fallback available"
|
||||
|
||||
return 'unknown', f"? {hostname}: Unknown connectivity state"
|
||||
"""Analyze connectivity results and provide recommendations.
|
||||
|
||||
Split into smaller helpers to keep this function's complexity low
|
||||
while preserving the original decision logic.
|
||||
"""
|
||||
for handler in (
|
||||
self._handle_primary_success,
|
||||
self._handle_primary_ping_only,
|
||||
self._handle_fallback_path,
|
||||
self._handle_no_fallback,
|
||||
):
|
||||
outcome = handler(result)
|
||||
if outcome is not None:
|
||||
return outcome
|
||||
|
||||
hostname = result["hostname"]
|
||||
return "unknown", f"? {hostname}: Unknown connectivity state"
|
||||
|
||||
def _handle_primary_success(self, result: Dict) -> Optional[Tuple[str, str]]:
|
||||
"""Handle case where primary IP works perfectly."""
|
||||
if result.get("primary_ping") and result.get("primary_ssh"):
|
||||
hostname = result["hostname"]
|
||||
primary_ip = result["primary_ip"]
|
||||
return "success", f"✓ {hostname} is fully accessible via primary IP {primary_ip}"
|
||||
return None
|
||||
|
||||
def _handle_primary_ping_only(self, result: Dict) -> Optional[Tuple[str, str]]:
|
||||
"""Handle cases where primary ping works but SSH fails."""
|
||||
if result.get("primary_ping") and not result.get("primary_ssh"):
|
||||
hostname = result["hostname"]
|
||||
primary_ip = result["primary_ip"]
|
||||
error = result.get("primary_ssh_error", "")
|
||||
|
||||
if "Permission denied" in error:
|
||||
return (
|
||||
"ssh_key",
|
||||
f"⚠ {hostname}: SSH key issue on {primary_ip} - run: make copy-ssh-key HOST={hostname}",
|
||||
)
|
||||
if "Connection refused" in error:
|
||||
return "ssh_service", f"⚠ {hostname}: SSH service not running on {primary_ip}"
|
||||
return "ssh_error", f"⚠ {hostname}: SSH error on {primary_ip} - {error}"
|
||||
|
||||
return None
|
||||
|
||||
def _handle_fallback_path(self, result: Dict) -> Optional[Tuple[str, str]]:
|
||||
"""Handle cases where primary fails and a fallback IP is defined."""
|
||||
if result.get("primary_ping"):
|
||||
return None
|
||||
|
||||
fallback_ip = result.get("fallback_ip")
|
||||
if not fallback_ip:
|
||||
return None
|
||||
|
||||
hostname = result["hostname"]
|
||||
primary_ip = result["primary_ip"]
|
||||
|
||||
if result.get("fallback_ping") and result.get("fallback_ssh"):
|
||||
return (
|
||||
"use_fallback",
|
||||
f"→ {hostname}: Switch to fallback IP {fallback_ip} (primary {primary_ip} failed)",
|
||||
)
|
||||
|
||||
if result.get("fallback_ping") and not result.get("fallback_ssh"):
|
||||
return (
|
||||
"fallback_ssh",
|
||||
f"⚠ {hostname}: Fallback IP {fallback_ip} reachable but SSH failed",
|
||||
)
|
||||
|
||||
return (
|
||||
"both_failed",
|
||||
f"✗ {hostname}: Both primary {primary_ip} and fallback {fallback_ip} failed",
|
||||
)
|
||||
|
||||
def _handle_no_fallback(self, result: Dict) -> Optional[Tuple[str, str]]:
|
||||
"""Handle cases where primary failed and no fallback IP is available."""
|
||||
if result.get("primary_ping"):
|
||||
return None
|
||||
|
||||
fallback_ip = result.get("fallback_ip")
|
||||
if fallback_ip:
|
||||
return None
|
||||
|
||||
hostname = result["hostname"]
|
||||
primary_ip = result["primary_ip"]
|
||||
return "no_fallback", f"✗ {hostname}: Primary IP {primary_ip} failed, no fallback available"
|
||||
|
||||
def run_tests(self) -> List[Dict]:
|
||||
"""Run connectivity tests for all hosts."""
|
||||
@ -264,8 +316,8 @@ class ConnectivityTester:
|
||||
|
||||
# Auto-fallback suggestion
|
||||
if fallback_needed:
|
||||
print(f"\n🤖 Or run auto-fallback to fix automatically:")
|
||||
print(f" make auto-fallback")
|
||||
print("\n🤖 Or run auto-fallback to fix automatically:")
|
||||
print(" make auto-fallback")
|
||||
|
||||
def export_json(self, results: List[Dict], output_file: str):
|
||||
"""Export results to JSON file."""
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user