Compare commits

...

1 Commits

Author SHA1 Message Date
ilia
c7a300b922 Add POTE app project support and improve IP conflict detection
Some checks failed
CI / lint-and-test (pull_request) Successful in 1m21s
CI / ansible-validation (pull_request) Successful in 9m3s
CI / secret-scanning (pull_request) Successful in 3m19s
CI / dependency-scan (pull_request) Successful in 7m13s
CI / sast-scan (pull_request) Successful in 6m38s
CI / license-check (pull_request) Successful in 1m16s
CI / vault-check (pull_request) Failing after 6m40s
CI / playbook-test (pull_request) Successful in 9m28s
CI / container-scan (pull_request) Successful in 7m59s
CI / sonar-analysis (pull_request) Failing after 1m11s
CI / workflow-summary (pull_request) Successful in 1m11s
- Add roles/pote: Python/venv deployment role with PostgreSQL, cron jobs
- Add playbooks/app/: Proxmox app stack provisioning and configuration
- Add roles/app_setup: Generic app deployment role (Node.js/systemd)
- Add roles/base_os: Base OS hardening role
- Enhance roles/proxmox_vm: Split LXC/KVM tasks, improve error handling
- Add IP uniqueness validation: Preflight check for duplicate IPs within projects
- Add Proxmox-side IP conflict detection: Check existing LXC net0 configs
- Update inventories/production/group_vars/all/main.yml: Add pote project config
- Add vault.example.yml: Template for POTE secrets (git key, DB, SMTP)
- Update .gitignore: Exclude deploy keys, backup files, and other secrets
- Update documentation: README, role docs, execution flow guides

Security:
- All secrets stored in encrypted vault.yml (never committed in plaintext)
- Deploy keys excluded via .gitignore
- IP conflict guardrails prevent accidental duplicate IP assignments
2025-12-28 20:54:50 -05:00
49 changed files with 2481 additions and 337 deletions

10
.gitignore vendored
View File

@ -6,6 +6,16 @@
*.tmp
*.bak
*~
vault.yml.bak.*
# Deploy keys and SSH private keys - NEVER commit these!
*_deploy_key
*_deploy_key.pub
*.pem
*.key
id_rsa
id_ed25519
id_ecdsa
# Python bytecode
__pycache__/

View File

@ -16,6 +16,7 @@ PLAYBOOK_LOCAL := playbooks/local.yml
PLAYBOOK_MAINTENANCE := playbooks/maintenance.yml
PLAYBOOK_TAILSCALE := playbooks/tailscale.yml
PLAYBOOK_PROXMOX := playbooks/infrastructure/proxmox-vm.yml
PLAYBOOK_PROXMOX_INFO := playbooks/app/proxmox_info.yml
# Collection and requirement paths
COLLECTIONS_REQ := collections/requirements.yml
@ -152,6 +153,18 @@ test-syntax: ## Run comprehensive syntax and validation checks
fi; \
done
@echo ""
@echo "$(YELLOW)App Project Playbooks:$(RESET)"
@for playbook in playbooks/app/site.yml playbooks/app/provision_vms.yml playbooks/app/configure_app.yml playbooks/app/ssh_client_config.yml; do \
if [ -f "$$playbook" ]; then \
printf " %-25s " "$$playbook"; \
if ansible-playbook "$$playbook" --syntax-check >/dev/null 2>&1; then \
echo "$(GREEN)✓ OK$(RESET)"; \
else \
echo "$(RED)✗ FAIL$(RESET)"; \
fi; \
fi; \
done
@echo ""
@echo "$(YELLOW)Role Test Playbooks:$(RESET)"
@for test_playbook in roles/*/tests/test.yml; do \
if [ -f "$$test_playbook" ]; then \
@ -195,10 +208,14 @@ test-syntax: ## Run comprehensive syntax and validation checks
@for yaml_file in inventories/production/group_vars/all/main.yml; do \
if [ -f "$$yaml_file" ]; then \
printf " %-25s " "$$yaml_file (YAML)"; \
if python3 -c "import yaml; yaml.safe_load(open('$$yaml_file'))" >/dev/null 2>&1; then \
echo "$(GREEN)✓ OK$(RESET)"; \
if python3 -c "import yaml" >/dev/null 2>&1; then \
if python3 -c "import yaml; yaml.safe_load(open('$$yaml_file'))" >/dev/null 2>&1; then \
echo "$(GREEN)✓ OK$(RESET)"; \
else \
echo "$(RED)✗ FAIL$(RESET)"; \
fi; \
else \
echo "$(RED)✗ FAIL$(RESET)"; \
echo "$(YELLOW)⚠ Skipped (PyYAML not installed)$(RESET)"; \
fi; \
fi; \
done
@ -528,6 +545,42 @@ monitoring: ## Install monitoring tools on all machines
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_DEV) --tags monitoring
@echo "$(GREEN)✓ Monitoring installation complete$(RESET)"
proxmox-info: ## Show Proxmox VM/LXC info (usage: make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc|qemu|all])
@echo "$(YELLOW)Querying Proxmox guest info...$(RESET)"
@EXTRA=""; \
if [ -n "$(PROJECT)" ]; then EXTRA="$$EXTRA -e app_project=$(PROJECT)"; fi; \
if [ "$(ALL)" = "true" ]; then EXTRA="$$EXTRA -e proxmox_info_all=true"; fi; \
if [ -n "$(TYPE)" ]; then EXTRA="$$EXTRA -e proxmox_info_type=$(TYPE)"; fi; \
$(ANSIBLE_PLAYBOOK) $(PLAYBOOK_PROXMOX_INFO) $$EXTRA
app-provision: ## Provision app project containers/VMs on Proxmox (usage: make app-provision PROJECT=projectA)
ifndef PROJECT
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
@echo "Usage: make app-provision PROJECT=projectA"
@exit 1
endif
@echo "$(YELLOW)Provisioning app project guests on Proxmox: $(PROJECT)$(RESET)"
$(ANSIBLE_PLAYBOOK) playbooks/app/provision_vms.yml -e app_project=$(PROJECT)
app-configure: ## Configure OS + app on project guests (usage: make app-configure PROJECT=projectA)
ifndef PROJECT
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
@echo "Usage: make app-configure PROJECT=projectA"
@exit 1
endif
@echo "$(YELLOW)Configuring app project guests: $(PROJECT)$(RESET)"
$(ANSIBLE_PLAYBOOK) playbooks/app/configure_app.yml -e app_project=$(PROJECT)
app: ## Provision + configure app project (usage: make app PROJECT=projectA)
ifndef PROJECT
@echo "$(RED)Error: PROJECT parameter required$(RESET)"
@echo "Usage: make app PROJECT=projectA"
@exit 1
endif
@echo "$(YELLOW)Provisioning + configuring app project: $(PROJECT)$(RESET)"
$(ANSIBLE_PLAYBOOK) playbooks/app/site.yml -e app_project=$(PROJECT)
test-connectivity: ## Test host connectivity with detailed diagnostics and recommendations
@echo "$(YELLOW)Testing host connectivity...$(RESET)"
@if [ -f "test_connectivity.py" ]; then \

218
README.md
View File

@ -1,178 +1,80 @@
# Ansible Infrastructure Management
Comprehensive infrastructure automation for development environments, server management, and VM provisioning.
Ansible automation for development machines, service hosts, and **Proxmox-managed guests** (LXC-first, with a path for KVM VMs).
## 📊 **Current Status**
### ✅ **Completed Infrastructure**
- **Core System**: Base packages, SSH hardening, user management
- **Development Environment**: Git, Node.js, Python, Docker, modern CLI tools
- **Shell Configuration**: Zsh + Oh My Zsh + Powerlevel10k + plugins
- **Applications**: VS Code, Cursor, Brave, LibreOffice, desktop tools
- **Monitoring**: System monitoring tools + custom scripts (`sysinfo`, `netinfo`)
- **VPN Mesh**: Tailscale integration with automated auth keys
- **Security**: UFW firewall, fail2ban, SSH hardening
- **Maintenance**: Automated package updates and system cleanup
### 🎯 **Next Priorities**
1. **Enhanced monitoring**: Grafana + Prometheus dashboard
2. **Security hardening**: ClamAV antivirus, Lynis auditing, vulnerability scanning
3. **Centralized logging**: ELK stack for log aggregation
4. **CI/CD pipeline**: GitLab Runner or Jenkins integration
5. **Advanced security**: Intrusion detection, automated patching
## 🚀 Quick Start
## Quick start
```bash
# Install dependencies
# Install Python deps + Ansible collections
make bootstrap
# Set up secrets management
make create-vault
# Edit secrets (Proxmox credentials, SSH public key, etc.)
make edit-group-vault
# Test configuration (comprehensive)
make test
# Deploy to all hosts (dry run first)
make check
make apply
# Validate the repo
make test-syntax
```
## 📚 Documentation
## Proxmox app projects (LXC-first)
### Getting Started
- [**Initial Setup Guide**](docs/guides/setup.md) - First-time setup instructions
- [**Ansible Vault Guide**](docs/guides/vault.md) - Managing secrets securely
- [**Tailscale VPN Setup**](docs/guides/tailscale.md) - Mesh networking configuration
This repo can provision and configure **dev/qa/prod guests per application project** using the `app_projects` model.
### Reference
- [**Installed Applications**](docs/reference/applications.md) - Complete software inventory
- [**Makefile Commands**](docs/reference/makefile.md) - All available make targets
- [**Architecture Overview**](docs/reference/architecture.md) - System design and structure
- **Configure projects**: `inventories/production/group_vars/all/main.yml` (`app_projects`)
- **Configure secrets**: `inventories/production/group_vars/all/vault.yml` (encrypted)
- **Run end-to-end**:
## 🏗️ Project Structure
```bash
make app PROJECT=projectA
```
Other useful entry points:
- **Provision only**: `make app-provision PROJECT=projectA`
- **Configure only**: `make app-configure PROJECT=projectA`
- **Info / safety**: `make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc|qemu|all]`
Safety notes:
- **IP conflict precheck**: provisioning fails if the target IP responds
(override with `-e allow_ip_conflicts=true` only if you really mean it).
- **VMID/CTID collision guardrail**: provisioning fails if the VMID exists but the guest name doesn't match
(override with `-e allow_vmid_collision=true` only if you really mean it).
- **No destructive playbooks**: this repo intentionally does **not** ship “destroy/decommission” automation.
Docs:
- `docs/guides/app_stack_proxmox.md`
- `docs/guides/app_stack_execution_flow.md`
## Project structure (relevant paths)
```
ansible/
├── Makefile # Task automation
├── ansible.cfg # Ansible configuration
├── hosts # Inventory file
├── collections/
│ └── requirements.yml # Galaxy dependencies
├── group_vars/ # Global variables
│ ├── all.yml
│ └── all/vault.yml # Encrypted secrets
├── host_vars/ # Host-specific configs
├── roles/ # Ansible roles
│ ├── base/ # Core system setup
│ ├── development/ # Dev tools
│ ├── docker/ # Container platform
│ ├── monitoring/ # System monitoring
│ ├── tailscale/ # VPN networking
│ └── ... # Additional roles
├── Makefile
├── ansible.cfg
├── collections/requirements.yml
├── inventories/production/
│ ├── hosts
│ ├── group_vars/all/
│ │ ├── main.yml
│ │ ├── vault.yml
│ │ └── vault.example.yml
│ └── host_vars/
├── playbooks/
│ ├── dev-playbook.yml # Development setup
│ ├── local-playbook.yml # Local machine
│ ├── maintenance-playbook.yml
│ └── tailscale-playbook.yml
└── docs/ # Documentation
├── guides/ # How-to guides
└── reference/ # Technical reference
│ ├── app/
│ │ ├── site.yml
│ │ ├── provision_vms.yml
│ │ ├── configure_app.yml
│ │ └── proxmox_info.yml
│ └── site.yml
└── roles/
├── proxmox_vm/
├── base_os/
├── app_setup/
└── pote/
```
## 🎯 Key Features
## Documentation
### Infrastructure Management
- **Automated Provisioning**: Proxmox VM creation and configuration
- **Configuration Management**: Consistent setup across all machines
- **Network Security**: Tailscale VPN mesh networking
- **System Maintenance**: Automated updates and cleanup
### Development Environment
- **Shell Environment**: Zsh + Oh My Zsh + Powerlevel10k
- **Container Platform**: Docker CE with Compose
- **Development Tools**: Node.js, Python, Git, build tools
- **Code Editors**: VS Code, Cursor IDE
### Security & Monitoring
- **SSH Hardening**: Modern crypto, key-only auth, fail2ban
- **Firewall**: UFW with sensible defaults
- **Monitoring Tools**: btop, iotop, nethogs, custom dashboards
## 🧪 Testing & Validation
### Comprehensive Testing
```bash
make test # Full test suite (lint + syntax + validation)
make test-syntax # Syntax and configuration validation only
make lint # Ansible-lint only
```
### Testing Coverage
- **Playbook syntax**: All main playbooks and infrastructure playbooks
- **Role validation**: All role test playbooks
- **Configuration files**: YAML and INI file validation
- **Documentation**: Markdown syntax and link checking (installed via `make bootstrap`)
- **Linting**: Full Ansible best practices validation
## 🖥️ Managed Hosts
| Host | Type | OS | Purpose |
|------|------|-----|---------|
| dev01 | Physical | Debian | Primary development |
| bottom | Physical | Debian | Secondary development |
| debianDesktopVM | VM | Debian | Desktop environment |
| giteaVM | VM | Alpine | Git repository hosting |
| portainerVM | VM | Alpine | Container management |
| homepageVM | VM | Debian | Service dashboard |
## 🔧 Common Tasks
```bash
# System Maintenance
make maintenance # Update all systems
make maintenance HOST=dev01 # Update specific host
# Development Setup
make docker # Install Docker
make shell # Configure shell
make apps # Install applications
# Network & Security
make tailscale # Deploy VPN
make security # Security hardening
make monitoring # Deploy monitoring
# Infrastructure
make create-vm # Create new VM
make status # Check connectivity
make facts # Gather system info
```
## 🛠️ Requirements
### Control Machine (where you run Ansible)
- Python 3.x with `pipx` (recommended) or `pip3`
- Node.js and `npm` (for documentation testing)
- SSH access to target hosts
- Ansible Vault password (for secrets)
### Target Hosts
- SSH server running
- Python 3.x
- `sudo` access for the Ansible user
### Dependency Management
All project dependencies are managed through standard requirements files:
- **`requirements.txt`** - Python packages (ansible, ansible-lint, etc.)
- **`package.json`** - Node.js packages (markdown tools)
- **`collections/requirements.yml`** - Ansible collections
**Setup**: Run `make bootstrap` to install all dependencies automatically.
## 📝 Contributing
1. Test changes with `make check` (dry run)
2. Follow existing patterns and naming conventions
3. Update documentation for new features
4. Encrypt sensitive data with Ansible Vault
- **Guides**: `docs/guides/`
- **Reference**: `docs/reference/`

View File

@ -1,4 +1,6 @@
---
# Collections required for this repo.
# Install with: ansible-galaxy collection install -r collections/requirements.yml
collections:
- name: community.general
version: ">=6.0.0"

9
configure_app.yml Normal file
View File

@ -0,0 +1,9 @@
---
# Wrapper playbook
# Purpose:
# ansible-playbook -i inventories/production configure_app.yml -e app_project=projectA
- name: Configure app project guests
import_playbook: playbooks/app/configure_app.yml

View File

@ -0,0 +1,173 @@
# App stack execution flow (what happens when you run it)
This document describes **exactly** what Ansible runs and what it changes when you execute the Proxmox app stack playbooks.
## Entry points
- Recommended end-to-end run:
- `playbooks/app/site.yml`
- Repo-root wrappers (equivalent):
- `site.yml` (imports `playbooks/site.yml`, and you can `--tags app`)
- `provision_vms.yml` (imports `playbooks/app/provision_vms.yml`)
- `configure_app.yml` (imports `playbooks/app/configure_app.yml`)
## High-level flow
When you run `playbooks/app/site.yml`, it imports two playbooks in order:
1. `playbooks/app/provision_vms.yml` (**Proxmox API changes happen here**)
2. `playbooks/app/configure_app.yml` (**SSH into guests and configure OS/app**)
## Variables that drive everything
All per-project/per-env inputs come from:
- `inventories/production/group_vars/all/main.yml``app_projects`
Each `app_projects.<project>.envs.<env>` contains:
- `name` (container hostname / inventory host name)
- `vmid` (Proxmox CTID)
- `ip` (static IP in CIDR form, e.g. `10.0.10.101/24`)
- `gateway` (e.g. `10.0.10.1`)
- `branch` (`dev`, `qa`, `main`)
- `env_vars` (key/value map written to `/srv/app/.env.<env>`)
Proxmox connection variables are also read from `inventories/production/group_vars/all/main.yml` but are usually vault-backed:
- `proxmox_host: "{{ vault_proxmox_host }}"`
- `proxmox_user: "{{ vault_proxmox_user }}"`
- `proxmox_node: "{{ vault_proxmox_node | default('pve') }}"`
## Phase 1: Provisioning via Proxmox API
### File chain
`playbooks/app/site.yml` imports `playbooks/app/provision_vms.yml`, which does:
- Validates `app_project` exists (if you passed one)
- Loops projects → includes `playbooks/app/provision_one_guest.yml`
- Loops envs inside the project → includes `playbooks/app/provision_one_env.yml`
### Preflight IP safety check
In `playbooks/app/provision_one_env.yml`:
- It runs `ping` against the target IP.
- If the IP responds, the play **fails** to prevent accidental duplicate-IP provisioning.
- You can override the guard (not recommended) with `-e allow_ip_conflicts=true`.
### What it creates/updates in Proxmox
In `playbooks/app/provision_one_env.yml` it calls role `roles/proxmox_vm` with LXC variables.
`roles/proxmox_vm/tasks/main.yml` dispatches:
- If `proxmox_guest_type == 'lxc'` → includes `roles/proxmox_vm/tasks/lxc.yml`
`roles/proxmox_vm/tasks/lxc.yml` performs:
1. **Build CT network config**
- Produces a `netif` dict like:
- `net0: name=eth0,bridge=vmbr0,firewall=1,ip=<CIDR>,gw=<GW>`
2. **Create/update the container**
- Uses `community.proxmox.proxmox` with:
- `state: present`
- `update: true` (so re-runs reconcile config)
- `vmid`, `hostname`, `ostemplate`, CPU/mem/swap, rootfs sizing, `netif`
- `pubkey` and optionally `password` for initial root access
3. **Start the container**
- Ensures `state: started` (if `lxc_start_after_create: true`)
4. **Wait for SSH**
- `wait_for: host=<ip> port=22`
### Dynamic inventory creation
Still in `playbooks/app/provision_one_env.yml`, it calls `ansible.builtin.add_host` so the guests become available to later plays:
- Adds the guest to groups:
- `app_all`
- `app_<project>_all`
- `app_<project>_<env>`
- Sets:
- `ansible_host` to the IP (without CIDR)
- `ansible_user: root` (bootstrap user for first config)
- `app_project`, `app_env` facts
## Phase 2: Configure OS + app on the guests
`playbooks/app/configure_app.yml` contains two plays:
### Play A: Build dynamic inventory (localhost)
This play exists so you can run `configure_app.yml` even if you didnt run provisioning in the same Ansible invocation.
- It loops over projects/envs from `app_projects`
- Adds hosts to:
- `app_all`, `app_<project>_all`, `app_<project>_<env>`
- Uses:
- `ansible_user: "{{ app_bootstrap_user | default('root') }}"`
### Play B: Configure the hosts (SSH + sudo)
Targets:
- If you pass `-e app_project=projectA``hosts: app_projectA_all`
- Otherwise → `hosts: app_all`
Tasks executed on each guest:
1. **Resolve effective project/env variables**
- `project_def = app_projects[app_project]`
- `env_def = app_projects[app_project].envs[app_env]`
2. **Role: `base_os`** (`roles/base_os/tasks/main.yml`)
- Updates apt cache
- Installs baseline packages (git/curl/nodejs/npm/ufw/etc.)
- Creates `appuser` (passwordless sudo)
- Adds your SSH public key to `appuser`
- Enables UFW and allows:
- SSH (22)
- backend port (default `3001`, overridable per project)
- frontend port (default `3000`, overridable per project)
3. **Role: `app_setup`** (`roles/app_setup/tasks/main.yml`)
- Creates:
- `/srv/app`
- `/srv/app/backend`
- `/srv/app/frontend`
- Writes the env file:
- `/srv/app/.env.<dev|qa|prod>` from template `roles/app_setup/templates/env.j2`
- Writes the deploy script:
- `/usr/local/bin/deploy_app.sh` from `roles/app_setup/templates/deploy_app.sh.j2`
- Script does:
- `git clone` if missing
- `git checkout/pull` correct branch
- runs backend install + migrations
- runs frontend install + build
- restarts systemd services
- Writes systemd units:
- `/etc/systemd/system/app-backend.service` from `app-backend.service.j2`
- `/etc/systemd/system/app-frontend.service` from `app-frontend.service.j2`
- Reloads systemd and enables/starts both services
## What changes on first run vs re-run
- **Provisioning**:
- First run: creates CTs in Proxmox, sets static IP config, starts them.
- Re-run: reconciles settings because `update: true` is used.
- **Configuration**:
- Mostly idempotent (directories/templates/users/firewall/services converge).
## Common “before you run” checklist
- Confirm `app_projects` has correct IPs/CTIDs/branches:
- `inventories/production/group_vars/all/main.yml`
- Ensure vault has Proxmox + SSH key material:
- `inventories/production/group_vars/all/vault.yml`
- Reference template: `inventories/production/group_vars/all/vault.example.yml`

View File

@ -0,0 +1,90 @@
# Proxmox App Projects (LXC-first)
This guide documents the **modular app-project stack** that provisions Proxmox guests (dev/qa/prod) and configures a full-stack app layout on them.
## What you get
- Proxmox provisioning via API (currently **LXC**; VM support remains via existing `roles/proxmox_vm` KVM path)
- A deployment user (`appuser`) with your SSH key
- `/srv/app/backend` and `/srv/app/frontend`
- Env file `/srv/app/.env.<dev|qa|prod>`
- `/usr/local/bin/deploy_app.sh` to pull the right branch and restart services
- systemd services:
- `app-backend.service`
- `app-frontend.service`
## Where to configure projects
Edit:
- `inventories/production/group_vars/all/main.yml`
Under `app_projects`, define projects like:
- `projectA.repo_url`
- `projectA.envs.dev|qa|prod.ip/gateway/branch`
- `projectA.guest_defaults` (cores/memory/rootfs sizing)
- `projectA.deploy.*` (install/build/migrate/start commands)
Adding **projectB** is just adding another top-level `app_projects.projectB` entry.
## Proxmox credentials (vault)
This repo already expects Proxmox connection vars in vault (see existing Proxmox playbooks). Ensure these exist in:
- `inventories/production/group_vars/all/vault.yml` (encrypted)
Common patterns:
- `vault_proxmox_host`: `10.0.10.201`
- `vault_proxmox_user`: e.g. `root@pam` or `ansible@pve`
- `vault_proxmox_node`: e.g. `pve`
- Either:
- `vault_proxmox_password`, or
- `vault_proxmox_token` + `vault_proxmox_token_id`
## Debian LXC template
The LXC provisioning uses `lxc_ostemplate`, defaulting to a Debian 12 template string like:
`local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst`
If your Proxmox has a different template filename, change `lxc_ostemplate` in `inventories/production/group_vars/all/main.yml`.
## Running it
Provision + configure one project:
```bash
ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
```
Provision + configure all projects in `app_projects`:
```bash
ansible-playbook -i inventories/production playbooks/app/site.yml
```
Only provisioning (Proxmox API):
```bash
ansible-playbook -i inventories/production playbooks/app/provision_vms.yml -e app_project=projectA
```
Only OS/app configuration:
```bash
ansible-playbook -i inventories/production playbooks/app/configure_app.yml -e app_project=projectA
```
## Optional: SSH aliases on your workstation
To write `~/.ssh/config` entries (disabled by default):
```bash
ansible-playbook -i inventories/production playbooks/app/ssh_client_config.yml -e manage_ssh_config=true -e app_project=projectA
```
This creates aliases like `projectA-dev`, `projectA-qa`, `projectA-prod`.

View File

@ -129,7 +129,7 @@ vault_ssh_public_key: "ssh-ed25519 AAAA..."
## Step 7: Configure Variables
### Global Settings
Edit `group_vars/all.yml`:
Edit `inventories/production/group_vars/all/main.yml`:
```yaml
# Timezone and locale
timezone: "America/New_York" # Your timezone
@ -145,7 +145,7 @@ ssh_permit_root_login: "no"
```
### Host-Specific Settings
Create/edit `host_vars/hostname.yml` for host-specific configuration.
Create/edit `inventories/production/host_vars/<hostname>.yml` for host-specific configuration.
## Step 8: Test Configuration
@ -159,7 +159,7 @@ make check
make check HOST=dev01
# Check specific role
ansible-playbook dev-playbook.yml --check --tags docker
ansible-playbook playbooks/development.yml --check --tags docker
```
## Step 9: Deploy
@ -208,7 +208,7 @@ ansible dev -m shell -a "tailscale status"
### Vault Password Issues
- Check vault password file exists and has correct permissions
- Verify password is correct: `ansible-vault view group_vars/all/vault.yml`
- Verify password is correct: `ansible-vault view inventories/production/group_vars/all/vault.yml`
### Python Not Found
- Install Python on target: `sudo apt install python3`

View File

@ -46,21 +46,21 @@ make tailscale-status
make tailscale-dev
# Specific hosts
ansible-playbook tailscale-playbook.yml --limit "dev01,bottom"
ansible-playbook playbooks/tailscale.yml --limit "dev01,bottom"
```
### Manual Installation
```bash
# With custom auth key (not recommended - use vault instead)
ansible-playbook tailscale-playbook.yml -e "tailscale_auth_key=your-key"
ansible-playbook playbooks/tailscale.yml -e "tailscale_auth_key=your-key"
# As part of existing playbooks
ansible-playbook dev-playbook.yml --tags tailscale
ansible-playbook playbooks/development.yml --tags tailscale
```
## Configuration
### Global Settings (`group_vars/all.yml`)
### Global Settings (`inventories/production/group_vars/all/main.yml`)
```yaml
tailscale_auth_key: "{{ vault_tailscale_auth_key }}" # From vault
tailscale_accept_routes: true # Accept subnet routes
@ -68,7 +68,7 @@ tailscale_accept_dns: true # Accept DNS settings
tailscale_ssh: true # Enable SSH over Tailscale
```
### Host-Specific Settings (`host_vars/hostname.yml`)
### Host-Specific Settings (`inventories/production/host_vars/<hostname>.yml`)
```yaml
tailscale_hostname: "custom-name" # Override hostname
tailscale_advertise_routes: "192.168.1.0/24" # Share local subnet
@ -100,7 +100,7 @@ sudo tailscale up
### Reset Connection
```bash
ansible-playbook tailscale-playbook.yml -e "tailscale_reset=true"
ansible-playbook playbooks/tailscale.yml -e "tailscale_reset=true"
```
## Security Best Practices
@ -119,7 +119,7 @@ The role automatically detects OS and uses appropriate package manager.
## How It Works
1. **Playbook runs** → looks for `tailscale_auth_key`
2. **Checks `all.yml`** → finds `{{ vault_tailscale_auth_key }}`
2. **Checks inventory group vars** → finds `{{ vault_tailscale_auth_key }}`
3. **Decrypts vault** → retrieves actual auth key
4. **Installs Tailscale** → configures with your settings
5. **Connects to network** → machine appears in admin console

View File

@ -6,7 +6,7 @@ Ansible Vault encrypts sensitive data like passwords and API keys while keeping
### Create Vault
```bash
make create-vault
make edit-group-vault
```
### Add Secrets
@ -38,32 +38,31 @@ database_password: "{{ vault_db_password }}"
## File Structure
```
group_vars/
├── all.yml # Plain text configuration
└── all/
└── vault.yml # Encrypted secrets (created by make create-vault)
host_vars/
├── dev01.yml # Host-specific plain text
└── dev01/
└── vault.yml # Host-specific secrets
inventories/production/
├── group_vars/
└── all/
│ ├── main.yml # Plain text configuration
│ └── vault.yml # Encrypted secrets (edit with make edit-group-vault)
└── host_vars/
├── dev01.yml # Host-specific plain text
└── dev01/
└── vault.yml # Host-specific secrets (edit with make edit-vault HOST=dev01)
```
## Common Commands
```bash
# Create new vault
make create-vault
# Edit group vault (production inventory)
make edit-group-vault
# Edit existing vault
make edit-vault # Global vault
make edit-vault HOST=dev01 # Host-specific vault
# Edit host-specific vault
make edit-vault HOST=dev01
# View decrypted contents
ansible-vault view group_vars/all/vault.yml
ansible-vault view inventories/production/group_vars/all/vault.yml
# Change vault password
ansible-vault rekey group_vars/all/vault.yml
ansible-vault rekey inventories/production/group_vars/all/vault.yml
```
## Password Management

View File

@ -97,7 +97,7 @@ Complete inventory of applications and tools deployed by Ansible playbooks.
## Installation by Playbook
### dev-playbook.yml
### `playbooks/development.yml`
Installs all roles for development machines:
- All system tools
- Development environment
@ -107,14 +107,14 @@ Installs all roles for development machines:
- Monitoring tools
- Tailscale VPN
### local-playbook.yml
### `playbooks/local.yml`
Installs for local machine management:
- Core system tools
- Shell environment
- Development basics
- Selected applications
### maintenance-playbook.yml
### `playbooks/maintenance.yml`
Maintains existing installations:
- System updates
- Package cleanup

View File

@ -80,7 +80,7 @@ Technical architecture and design of the Ansible infrastructure management syste
### Core Playbooks
```yaml
dev-playbook.yml # Development environment setup
playbooks/development.yml # Development environment setup
├── roles/maintenance # System updates
├── roles/base # Core packages
├── roles/ssh # SSH hardening
@ -93,20 +93,24 @@ dev-playbook.yml # Development environment setup
├── roles/tailscale # VPN setup
├── roles/monitoring # Monitoring tools
local-playbook.yml # Local machine
playbooks/local.yml # Local machine
├── roles/base
├── roles/shell
├── roles/development
└── roles/tailscale
maintenance-playbook.yml # System maintenance
playbooks/maintenance.yml # System maintenance
└── roles/maintenance
tailscale-playbook.yml # VPN deployment
playbooks/tailscale.yml # VPN deployment
└── roles/tailscale
proxmox-create-vm.yml # VM provisioning
playbooks/infrastructure/proxmox-vm.yml # KVM VM provisioning (controller VM, etc.)
└── roles/proxmox_vm
playbooks/app/site.yml # Proxmox app stack (LXC-first)
├── playbooks/app/provision_vms.yml # Proxmox API provisioning (LXC/KVM)
└── playbooks/app/configure_app.yml # Guest OS + app configuration over SSH
```
### Role Dependencies
@ -146,9 +150,9 @@ tailscale
## Data Flow
### Configuration Management
1. **Variables**group_vars/all.yml (global)
2. **Secrets** → group_vars/all/vault.yml (encrypted)
3. **Host Config**host_vars/hostname.yml (specific)
1. **Variables**`inventories/production/group_vars/all/main.yml`
2. **Secrets**`inventories/production/group_vars/all/vault.yml` (encrypted)
3. **Host Config**`inventories/production/host_vars/<hostname>.yml`
4. **Role Defaults** → roles/*/defaults/main.yml
5. **Tasks** → roles/*/tasks/main.yml
6. **Templates** → roles/*/templates/*.j2

View File

@ -58,6 +58,10 @@ Complete reference for all available `make` commands in the Ansible project.
| Command | Description | Usage |
|---------|-------------|-------|
| `create-vm` | Create Ansible controller VM on Proxmox | `make create-vm` |
| `proxmox-info` | Show Proxmox guest info (LXC/VM) | `make proxmox-info [PROJECT=projectA] [ALL=true] [TYPE=lxc\|qemu\|all]` |
| `app-provision` | Provision app project guests on Proxmox | `make app-provision PROJECT=projectA` |
| `app-configure` | Configure OS + app on project guests | `make app-configure PROJECT=projectA` |
| `app` | Provision + configure app project guests | `make app PROJECT=projectA` |
| `ping` | Ping hosts with colored output | `make ping [GROUP=dev] [HOST=dev01]` |
| `facts` | Gather facts from all hosts | `make facts` |
| `test-connectivity` | Test network and SSH access | `make test-connectivity` |
@ -69,6 +73,7 @@ Complete reference for all available `make` commands in the Ansible project.
| `copy-ssh-key` | Copy SSH key to specific host | `make copy-ssh-key HOST=giteaVM` |
| `create-vault` | Create encrypted vault file | `make create-vault` |
| `edit-vault` | Edit encrypted host vars | `make edit-vault HOST=dev01` |
| `edit-group-vault` | Edit encrypted group vars (production inventory) | `make edit-group-vault` |
## Utility Commands

View File

@ -30,3 +30,269 @@ tailscale_accept_routes: true
tailscale_accept_dns: true
tailscale_ssh: false
tailscale_hostname: "{{ inventory_hostname }}"
# -----------------------------------------------------------------------------
# Proxmox + modular app projects (LXC-first)
#
# This repo can manage many independent apps ("projects"). Each project defines
# its own dev/qa/prod guests (IPs/VMIDs/branches) under `app_projects`.
#
# Usage examples:
# - Run one project: ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/site.yml
# -----------------------------------------------------------------------------
# Proxmox API connection (keep secrets in vault)
proxmox_host: "{{ vault_proxmox_host }}"
proxmox_user: "{{ vault_proxmox_user }}"
proxmox_node: "{{ vault_proxmox_node | default('pve') }}"
proxmox_api_port: "{{ vault_proxmox_api_port | default(8006) }}"
# Proxmox commonly uses a self-signed cert; keep validation off by default.
proxmox_validate_certs: false
# Prefer API token auth (store in vault):
# - proxmox_token_id: "ansible@pve!token-name"
# - vault_proxmox_token: "secret"
proxmox_token_id: "{{ vault_proxmox_token_id | default('') }}"
# Default guest type for new projects. (Later you can set to `kvm` per project/env.)
proxmox_guest_type: lxc
# Proxmox LXC defaults (override per project/env as needed)
lxc_ostemplate: "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
lxc_storage: "local-lvm"
lxc_network_bridge: "vmbr0"
lxc_unprivileged: true
lxc_features_list:
- "keyctl=1"
- "nesting=1"
lxc_start_after_create: true
lxc_nameserver: "1.1.1.1 8.8.8.8"
# Base OS / access defaults
appuser_name: appuser
appuser_shell: /bin/bash
appuser_groups:
- sudo
# Store your workstation public key in vault_ssh_public_key
appuser_ssh_public_key: "{{ vault_ssh_public_key }}"
# App defaults (override per project)
app_backend_port: 3001
app_frontend_port: 3000
# Default Node workflow commands (override per project if your app differs)
app_backend_install_cmd: "npm ci"
app_backend_migrate_cmd: "npm run migrate"
app_backend_start_cmd: "npm start"
app_frontend_install_cmd: "npm ci"
app_frontend_build_cmd: "npm run build"
app_frontend_start_cmd: "npm start"
# Projects definition: add as many projects as you want here.
# Each project has envs (dev/qa/prod) defining name/vmid/ip/gateway/branch and
# optional env_vars (dummy placeholders by default).
#
# -----------------------------------------------------------------------------
# Proxmox VMID/CTID ranges (DEDICATED; avoid collisions)
#
# Proxmox IDs are global. Never reuse IDs across unrelated guests.
# Suggested reservation table (edit to your preference):
# - 9000-9099: pote
# - 9100-9199: punimTagFE
# - 9200-9299: punimTagBE
# - 9300-9399: projectA (example)
# -----------------------------------------------------------------------------
app_projects:
projectA:
description: "Example full-stack app (edit repo_url, IPs, secrets)."
repo_url: "git@github.com:example/projectA.git"
components:
backend: true
frontend: true
# Repo is assumed to contain `backend/` and `frontend/` directories.
repo_dest: "/srv/app"
# Optional overrides for this project
backend_port: "{{ app_backend_port }}"
frontend_port: "{{ app_frontend_port }}"
guest_defaults:
guest_type: "{{ proxmox_guest_type }}"
cores: 2
memory_mb: 2048
swap_mb: 512
rootfs_size_gb: 16
deploy:
backend_install_cmd: "{{ app_backend_install_cmd }}"
backend_migrate_cmd: "{{ app_backend_migrate_cmd }}"
backend_start_cmd: "{{ app_backend_start_cmd }}"
frontend_install_cmd: "{{ app_frontend_install_cmd }}"
frontend_build_cmd: "{{ app_frontend_build_cmd }}"
frontend_start_cmd: "{{ app_frontend_start_cmd }}"
envs:
dev:
name: "projectA-dev"
vmid: 9301
ip: "10.0.10.101/24"
gateway: "10.0.10.1"
branch: "dev"
env_vars:
APP_ENV: "dev"
BACKEND_BASE_URL: "http://10.0.10.101:{{ app_backend_port }}"
FRONTEND_BASE_URL: "http://10.0.10.101:{{ app_frontend_port }}"
SECRET_PLACEHOLDER: "change-me"
qa:
name: "projectA-qa"
vmid: 9302
ip: "10.0.10.102/24"
gateway: "10.0.10.1"
branch: "qa"
env_vars:
APP_ENV: "qa"
BACKEND_BASE_URL: "http://10.0.10.102:{{ app_backend_port }}"
FRONTEND_BASE_URL: "http://10.0.10.102:{{ app_frontend_port }}"
SECRET_PLACEHOLDER: "change-me"
prod:
name: "projectA-prod"
vmid: 9303
ip: "10.0.10.103/24"
gateway: "10.0.10.1"
branch: "main"
pote:
description: "POTE (python/venv + cron) project (edit repo_url, IPs, secrets)."
repo_url: "gitea@10.0.30.169:ilia/POTE.git"
# POTE deploys as a user-owned python app (not /srv/app)
repo_dest: "/home/poteapp/pote"
os_user: "poteapp"
components:
backend: false
frontend: false
guest_defaults:
guest_type: "{{ proxmox_guest_type }}"
cores: 2
memory_mb: 2048
swap_mb: 512
rootfs_size_gb: 16
# POTE-specific optional defaults (override per env if needed)
pote_db_host: "localhost"
pote_db_user: "poteuser"
pote_db_name: "potedb"
pote_smtp_host: "mail.levkin.ca"
pote_smtp_port: 587
envs:
dev:
name: "pote-dev"
vmid: 9001
ip: "10.0.10.114/24"
gateway: "10.0.10.1"
branch: "dev"
qa:
name: "pote-qa"
vmid: 9002
ip: "10.0.10.112/24"
gateway: "10.0.10.1"
branch: "qa"
prod:
name: "pote-prod"
vmid: 9003
ip: "10.0.10.113/24"
gateway: "10.0.10.1"
branch: "main"
punimTagFE:
description: "punimTag frontend-only project (edit repo_url, IPs, secrets)."
repo_url: "git@github.com:example/punimTagFE.git"
repo_dest: "/srv/app"
components:
backend: false
frontend: true
guest_defaults:
guest_type: "{{ proxmox_guest_type }}"
cores: 2
memory_mb: 2048
swap_mb: 512
rootfs_size_gb: 16
deploy:
frontend_install_cmd: "{{ app_frontend_install_cmd }}"
frontend_build_cmd: "{{ app_frontend_build_cmd }}"
frontend_start_cmd: "{{ app_frontend_start_cmd }}"
envs:
dev:
name: "punimTagFE-dev"
vmid: 9101
ip: "10.0.10.121/24"
gateway: "10.0.10.1"
branch: "dev"
env_vars:
APP_ENV: "dev"
SECRET_PLACEHOLDER: "change-me"
qa:
name: "punimTagFE-qa"
vmid: 9102
ip: "10.0.10.122/24"
gateway: "10.0.10.1"
branch: "qa"
env_vars:
APP_ENV: "qa"
SECRET_PLACEHOLDER: "change-me"
prod:
name: "punimTagFE-prod"
vmid: 9103
ip: "10.0.10.123/24"
gateway: "10.0.10.1"
branch: "main"
env_vars:
APP_ENV: "prod"
SECRET_PLACEHOLDER: "change-me"
punimTagBE:
description: "punimTag backend-only project (edit repo_url, IPs, secrets)."
repo_url: "git@github.com:example/punimTagBE.git"
repo_dest: "/srv/app"
components:
backend: true
frontend: false
guest_defaults:
guest_type: "{{ proxmox_guest_type }}"
cores: 2
memory_mb: 2048
swap_mb: 512
rootfs_size_gb: 16
deploy:
backend_install_cmd: "{{ app_backend_install_cmd }}"
backend_migrate_cmd: "{{ app_backend_migrate_cmd }}"
backend_start_cmd: "{{ app_backend_start_cmd }}"
envs:
dev:
name: "punimTagBE-dev"
vmid: 9201
ip: "10.0.10.131/24"
gateway: "10.0.10.1"
branch: "dev"
env_vars:
APP_ENV: "dev"
SECRET_PLACEHOLDER: "change-me"
qa:
name: "punimTagBE-qa"
vmid: 9202
ip: "10.0.10.132/24"
gateway: "10.0.10.1"
branch: "qa"
env_vars:
APP_ENV: "qa"
SECRET_PLACEHOLDER: "change-me"
prod:
name: "punimTagBE-prod"
vmid: 9203
ip: "10.0.10.133/24"
gateway: "10.0.10.1"
branch: "main"
env_vars:
APP_ENV: "prod"
SECRET_PLACEHOLDER: "change-me"

View File

@ -0,0 +1,42 @@
---
# Example vault values for Proxmox app projects.
#
# Copy required keys into your encrypted vault:
# make edit-group-vault
#
# Never commit real secrets unencrypted.
# Proxmox API
vault_proxmox_host: "10.0.10.201"
vault_proxmox_user: "root@pam"
vault_proxmox_node: "pve"
vault_proxmox_password: "CHANGE_ME"
# Optional token auth (recommended if you use it)
# vault_proxmox_token_id: "root@pam!ansible"
# vault_proxmox_token: "CHANGE_ME"
# SSH public key for appuser (workstation key)
vault_ssh_public_key: "ssh-ed25519 AAAA... you@example"
# LXC create bootstrap password (often required by Proxmox)
vault_lxc_root_password: "CHANGE_ME"
# -----------------------------------------------------------------------------
# POTE (python/venv + cron) secrets
# -----------------------------------------------------------------------------
# Private key used for cloning from Gitea (deploy key). Store as a multi-line block.
vault_pote_git_ssh_key: |
-----BEGIN OPENSSH PRIVATE KEY-----
CHANGE_ME
-----END OPENSSH PRIVATE KEY-----
# Environment-specific DB passwords (used by roles/pote)
vault_pote_db_password_dev: "CHANGE_ME"
vault_pote_db_password_qa: "CHANGE_ME"
vault_pote_db_password_prod: "CHANGE_ME"
# SMTP password for reports
vault_pote_smtp_password: "CHANGE_ME"

View File

@ -1,10 +1,47 @@
$ANSIBLE_VAULT;1.1;AES256
36343265643238633236643162613137393331386164306133666537633336633036376433386161
3135366566623235333264386539346364333435373065300a633231633731316633313166346161
30363334613965666634633665363632323966396464633636346533616634393664386566333230
3463666531323866660a666238383331383562313363386639646161653334313661393065343135
33613762653361656633366465306264323935363032353737333935363165346639616330333939
39336538643866366361313838636338643336376365373166376234383838656430623339313162
37353461313263643263376232393138396233366234336333613535366234383661353938663032
65383737343164343431363764333063326230623263323231366232626131306637353361343466
6131
36643038376636383030343730626264613839396462366365633837636130623639393361656634
3238353261633635353662653036393835313963373562390a646535376366656163383632313835
39646666653362336661633736333365343962346432653131613134353361366263373162386631
3134613438626132320a313765343338643535343837306339616564336564303166626164356530
63663363656535303137663431613861343662303664313332626166373463393931323937613230
66333665316331323637663437653339353737653336633864393033336630336438646162643662
31656164363933333036376263303034646366393134636630663631353235373831303264363762
66643865616130306537383836646237613730643133656333666632326538613764383530363363
61386161646637316166303633643665383365346534323939383034613430386362303038313761
36303364396436373466653332303562653038373962616539356633373065643130303036363161
65353163326136383066393332376236386333653532326337613163346334616234643562643265
62316134386365343733636661336130623364386634383965386135616633323132643365613231
34636435333031376136396336316337666161383562343834383865316436633333333065323138
37343865363731303137666330306131373734623637343531623562353332353437646631343363
30393565376435303430396535643165616534313334326462363130626639343038643835336335
33613630336534666163356631353438373462306566376134323536373832643264633365653465
62386363326436623330653430383262653732376235626432656362306363303663623834653664
31373762306539376431353137393664396165396261613364653339373765393863633833396131
36666235666234633430373338323331313531643736656137303937653865303431643164373161
39633238383265396366386230303536613461633431333565353433643935613231333232333063
36643435376165656262623863373039393837643564366531666462376162653630626634663037
39373439336239646131306133663566343734656339346462356662373561306264333364383966
38343463616666613037636335333137633737666166633364343736646232396566373866633531
34303734376137386363373039656565323364333539626630323465666636396465323861333365
35376161663630356132373638333937376164316361303531303637396334306133373237656265
36356532623130323565396531306136363339363437376364343138653139653335343765316365
38313035366137393365316139326236326330386365343665376335313339666231333632333133
32353865626531373462346261653832386234396531653136323162653865303861396233376261
34616232363965313635373833333737336166643734373633313865323066393930666562316136
36373763356365646361656436383463393237623461383531343134373336663763663464336361
38396532383932643065303731663565353366373033353237383538636365323064396531386134
61643964613930373439383032373364316437303239393434376465393639373634663738623461
37386366616333626434363761326361373533306635316164316363393264303633353939613239
37353266303637323139653630663236663633313061306633316139666539376632306630313362
34633834326433646230303634313266303530633236353262633066396462646365623935343161
34393166643666366164313438383939386434366665613330653739383139613732396633383261
33633664303131383163356362316639353064373861343132623565636631333135663034373461
61303031616634333235303066633939643337393862653031323936363932633438303035323238
66323066353737316166383533636661336637303265343937633064626164623462656134333732
33316536336430636636646561626232666633656266326339623732363531326131643764313838
62356537326166346666313930383639386466633432626235373738633833393164646238366465
62373938363739373036666238666433303061633732663565666433333631326432626461353037
39636263636632313431353364386566383134653139393762623562643561616166633035353038
39326462356332616563303462636536636132633933336532383938373030666333363264346632
64643063373830353130613662323131353964313038323735626464313363326364653732323732
3663393964633138376665323435366463623463613237366465

View File

@ -2,6 +2,10 @@
# Primary IPs: Tailscale (100.x.x.x) for remote access
# Fallback IPs: Local network (10.0.x.x) when Tailscale is down
# Usage: ansible_host_fallback is available for manual fallback
#
# NOTE: Proxmox app projects (dev/qa/prod) are provisioned dynamically via
# `playbooks/app/site.yml` (it uses `add_host` based on `app_projects`).
# You generally do NOT need to add project hosts here.
[gitea]
giteaVM ansible_host=10.0.30.169 ansible_user=root
@ -13,7 +17,7 @@ portainerVM ansible_host=10.0.30.69 ansible_user=ladmin
homepageVM ansible_host=10.0.30.12 ansible_user=homepage
[vaultwarden]
vaultwardenVM ansible_host=10.0.10.142 ansible_user=root
vaultwardenVM ansible_host=10.0.10.142 ansible_user=ladmin
[dev]
dev01 ansible_host=10.0.30.105 ansible_user=ladmin

2
package-lock.json generated
View File

@ -13,7 +13,7 @@
"markdownlint-cli2": "^0.18.1"
},
"engines": {
"node": ">=22.0.0",
"node": ">=20.0.0",
"npm": ">=10.0.0"
}
},

View File

@ -0,0 +1,134 @@
---
# Playbook: app/configure_app.yml
# Purpose: Configure OS + app runtime on app project guests created via provision_vms.yml
# Targets: app_all or per-project group created dynamically
# Tags: app, configure
#
# Usage:
# - Run one project: ansible-playbook -i inventories/production playbooks/app/site.yml -e app_project=projectA
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/site.yml
- name: Build dynamic inventory from app_projects (so configure can run standalone)
hosts: localhost
connection: local
gather_facts: false
vars:
selected_projects: >-
{{
(app_projects | dict2items | map(attribute='key') | list)
if (app_project is not defined or app_project | length == 0)
else [app_project]
}}
app_bootstrap_user_default: root
# If true, configure plays will use vault_lxc_root_password for initial SSH bootstrap.
bootstrap_with_root_password_default: false
tasks:
- name: Validate requested project exists
ansible.builtin.assert:
that:
- app_project is not defined or app_project in app_projects
fail_msg: "Requested app_project={{ app_project }} does not exist in app_projects."
- name: Add each project/env host (by static IP) to dynamic inventory
ansible.builtin.add_host:
name: "{{ app_projects[item.0].envs[item.1].name | default(item.0 ~ '-' ~ item.1) }}"
groups:
- "app_all"
- "app_{{ item.0 }}_all"
- "app_{{ item.0 }}_{{ item.1 }}"
ansible_host: "{{ (app_projects[item.0].envs[item.1].ip | string).split('/')[0] }}"
ansible_user: "{{ app_bootstrap_user | default(app_bootstrap_user_default) }}"
ansible_password: >-
{{
vault_lxc_root_password
if ((bootstrap_with_root_password | default(bootstrap_with_root_password_default) | bool) and (vault_lxc_root_password | default('') | length) > 0)
else omit
}}
app_project: "{{ item.0 }}"
app_env: "{{ item.1 }}"
loop: "{{ selected_projects | product(['dev', 'qa', 'prod']) | list }}"
when:
- app_projects[item.0] is defined
- app_projects[item.0].envs[item.1] is defined
- (app_projects[item.0].envs[item.1].ip | default('')) | length > 0
- name: Configure app guests (base OS + app setup)
hosts: >-
{{
('app_' ~ app_project ~ '_all')
if (app_project is defined and app_project | length > 0)
else 'app_all'
}}
become: true
gather_facts: true
tasks:
- name: Build project/env effective variables
ansible.builtin.set_fact:
project_def: "{{ app_projects[app_project] }}"
env_def: "{{ app_projects[app_project].envs[app_env] }}"
when: app_project is defined and app_env is defined
- name: Configure base OS
ansible.builtin.include_role:
name: base_os
vars:
base_os_backend_port: "{{ (project_def.backend_port | default(app_backend_port)) if project_def is defined else app_backend_port }}"
base_os_frontend_port: "{{ (project_def.frontend_port | default(app_frontend_port)) if project_def is defined else app_frontend_port }}"
base_os_enable_backend: "{{ project_def.components.backend | default(true) }}"
base_os_enable_frontend: "{{ project_def.components.frontend | default(true) }}"
base_os_user: "{{ project_def.os_user | default(appuser_name) }}"
base_os_user_ssh_public_key: "{{ project_def.os_user_ssh_public_key | default(appuser_ssh_public_key | default('')) }}"
# Only override when explicitly provided (avoids self-referential recursion)
base_os_packages: "{{ project_def.base_os_packages if (project_def is defined and project_def.base_os_packages is defined) else omit }}"
- name: Configure POTE (python/venv + cron)
ansible.builtin.include_role:
name: pote
vars:
pote_git_repo: "{{ project_def.repo_url }}"
pote_git_branch: "{{ env_def.branch }}"
pote_user: "{{ project_def.os_user | default('poteapp') }}"
pote_group: "{{ project_def.os_user | default('poteapp') }}"
pote_app_dir: "{{ project_def.repo_dest | default('/home/' ~ (project_def.os_user | default('poteapp')) ~ '/pote') }}"
pote_env: "{{ app_env }}"
pote_db_host: "{{ env_def.pote_db_host | default(project_def.pote_db_host | default('localhost')) }}"
pote_db_name: "{{ env_def.pote_db_name | default(project_def.pote_db_name | default('potedb')) }}"
pote_db_user: "{{ env_def.pote_db_user | default(project_def.pote_db_user | default('poteuser')) }}"
pote_smtp_host: "{{ env_def.pote_smtp_host | default(project_def.pote_smtp_host | default('mail.levkin.ca')) }}"
pote_smtp_port: "{{ env_def.pote_smtp_port | default(project_def.pote_smtp_port | default(587)) }}"
pote_smtp_user: "{{ env_def.pote_smtp_user | default(project_def.pote_smtp_user | default('')) }}"
pote_from_email: "{{ env_def.pote_from_email | default(project_def.pote_from_email | default('')) }}"
pote_report_recipients: "{{ env_def.pote_report_recipients | default(project_def.pote_report_recipients | default('')) }}"
when: app_project == 'pote'
- name: Configure app layout + deploy + systemd
ansible.builtin.include_role:
name: app_setup
vars:
app_repo_url: "{{ project_def.repo_url }}"
app_repo_dest: "{{ project_def.repo_dest | default('/srv/app') }}"
app_repo_branch: "{{ env_def.branch }}"
# app_env is already set per-host via add_host (dev/qa/prod)
app_owner: "{{ project_def.os_user | default(appuser_name) }}"
app_group: "{{ project_def.os_user | default(appuser_name) }}"
app_backend_port: "{{ project_def.backend_port | default(app_backend_port) }}"
app_frontend_port: "{{ project_def.frontend_port | default(app_frontend_port) }}"
app_enable_backend: "{{ project_def.components.backend | default(true) }}"
app_enable_frontend: "{{ project_def.components.frontend | default(true) }}"
app_backend_install_cmd: "{{ project_def.deploy.backend_install_cmd | default(app_backend_install_cmd) }}"
app_backend_migrate_cmd: "{{ project_def.deploy.backend_migrate_cmd | default(app_backend_migrate_cmd) }}"
app_backend_start_cmd: "{{ project_def.deploy.backend_start_cmd | default(app_backend_start_cmd) }}"
app_frontend_install_cmd: "{{ project_def.deploy.frontend_install_cmd | default(app_frontend_install_cmd) }}"
app_frontend_build_cmd: "{{ project_def.deploy.frontend_build_cmd | default(app_frontend_build_cmd) }}"
app_frontend_start_cmd: "{{ project_def.deploy.frontend_start_cmd | default(app_frontend_start_cmd) }}"
app_env_vars: "{{ env_def.env_vars | default({}) }}"
when: app_project != 'pote'

View File

@ -0,0 +1,235 @@
---
# Helper tasks file for playbooks/app/provision_vms.yml
# Provisions a single (project, env) guest and adds it to dynamic inventory.
- name: Set environment facts
ansible.builtin.set_fact:
env_name: "{{ env_item.key }}"
env_def: "{{ env_item.value }}"
guest_name: "{{ env_item.value.name | default(project_key ~ '-' ~ env_item.key) }}"
# vmid is optional; if omitted, we will manage idempotency by unique guest_name
guest_vmid: "{{ env_item.value.vmid | default(none) }}"
- name: Normalize recreate_existing_envs to a list
ansible.builtin.set_fact:
recreate_envs_list: >-
{{
(recreate_existing_envs.split(',') | map('trim') | list)
if (recreate_existing_envs is defined and recreate_existing_envs is string)
else (recreate_existing_envs | default([]))
}}
- name: Check if Proxmox guest already exists (by VMID when provided)
community.proxmox.proxmox_vm_info:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
type: lxc
vmid: "{{ guest_vmid }}"
register: proxmox_guest_info_vmid
when: guest_vmid is not none
- name: Check if Proxmox guest already exists (by name when VMID omitted)
community.proxmox.proxmox_vm_info:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
type: lxc
name: "{{ guest_name }}"
register: proxmox_guest_info_name
when: guest_vmid is none
- name: Set guest_exists fact
ansible.builtin.set_fact:
guest_exists: >-
{{
((proxmox_guest_info_vmid.proxmox_vms | default([])) | length > 0)
if (guest_vmid is not none)
else ((proxmox_guest_info_name.proxmox_vms | default([])) | length > 0)
}}
- name: "Guardrail: abort if VMID exists but name does not match (prevents overwriting other guests)"
ansible.builtin.fail:
msg: >-
Refusing to use VMID {{ guest_vmid }} for {{ guest_name }} because it already exists as
"{{ (proxmox_guest_info_vmid.proxmox_vms[0].name | default('UNKNOWN')) }}".
Pick a different vmid range in app_projects or omit vmid to auto-allocate.
when:
- guest_vmid is not none
- (proxmox_guest_info_vmid.proxmox_vms | default([])) | length > 0
- (proxmox_guest_info_vmid.proxmox_vms[0].name | default('')) != guest_name
- not (allow_vmid_collision | default(false) | bool)
- name: Delete existing guest if requested (recreate)
community.proxmox.proxmox:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
vmid: "{{ guest_vmid }}"
purge: true
force: true
state: absent
when:
- guest_exists | bool
- guest_vmid is not none
- recreate_existing_guests | default(false) | bool or (env_name in recreate_envs_list)
- name: Mark guest as not existing after delete
ansible.builtin.set_fact:
guest_exists: false
when:
- guest_vmid is not none
- recreate_existing_guests | default(false) | bool or (env_name in recreate_envs_list)
- name: "Preflight: detect IP conflicts on Proxmox (existing LXC net0 ip=)"
community.proxmox.proxmox_vm_info:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
type: lxc
config: current
register: proxmox_all_lxc
when:
- (env_def.ip | default('')) | length > 0
- not (allow_ip_conflicts | default(false) | bool)
- not (guest_exists | default(false) | bool)
- name: Set proxmox_ip_conflicts fact
ansible.builtin.set_fact:
proxmox_ip_conflicts: >-
{%- set conflicts = [] -%}
{%- set target_ip = ((env_def.ip | string).split('/')[0]) -%}
{%- for vm in (proxmox_all_lxc.proxmox_vms | default([])) -%}
{%- set cfg_net0 = (
vm['config']['net0']
if (
vm is mapping and ('config' in vm)
and (vm['config'] is mapping) and ('net0' in vm['config'])
)
else none
) -%}
{%- set vm_netif = (vm['netif'] if (vm is mapping and ('netif' in vm)) else none) -%}
{%- set net0 = (
cfg_net0
if (cfg_net0 is not none)
else (
vm_netif['net0']
if (vm_netif is mapping and ('net0' in vm_netif))
else (
vm_netif
if (vm_netif is string)
else (vm['net0'] if (vm is mapping and ('net0' in vm)) else '')
)
)
) | string -%}
{%- set vm_ip = (net0 | regex_search('(?:^|,)ip=([^,]+)', '\\1') | default('')) | regex_replace('/.*$', '') -%}
{%- if (vm_ip | length) > 0 and vm_ip == target_ip -%}
{%- set _ = conflicts.append({'vmid': (vm.vmid | default('') | string), 'name': (vm.name | default('') | string), 'net0': net0}) -%}
{%- endif -%}
{%- endfor -%}
{{ conflicts }}
when:
- proxmox_all_lxc is defined
- (env_def.ip | default('')) | length > 0
- not (allow_ip_conflicts | default(false) | bool)
- not (guest_exists | default(false) | bool)
- name: Abort if IP is already assigned to an existing Proxmox LXC
ansible.builtin.fail:
msg: >-
Refusing to provision {{ guest_name }} because IP {{ (env_def.ip | string).split('/')[0] }}
is already present in Proxmox LXC net0 config: {{ proxmox_ip_conflicts }}.
Fix app_projects IPs or set -e allow_ip_conflicts=true.
when:
- (env_def.ip | default('')) | length > 0
- not (allow_ip_conflicts | default(false) | bool)
- not (guest_exists | default(false) | bool)
- (proxmox_ip_conflicts | default([])) | length > 0
- name: "Preflight: fail if target IP responds (avoid accidental duplicate IP)"
ansible.builtin.command: "ping -c 1 -W 1 {{ (env_def.ip | string).split('/')[0] }}"
register: ip_ping
changed_when: false
failed_when: false
when:
- (env_def.ip | default('')) | length > 0
- not (allow_ip_conflicts | default(false) | bool)
- not (guest_exists | default(false) | bool)
- name: Abort if IP appears to be in use
ansible.builtin.fail:
msg: >-
Refusing to provision {{ guest_name }} because IP {{ (env_def.ip | string).split('/')[0] }}
responded to ping. Fix app_projects IPs or set -e allow_ip_conflicts=true.
Note: this guardrail is ping-based; if your network blocks ICMP, an in-use IP may not respond.
when:
- (env_def.ip | default('')) | length > 0
- not (allow_ip_conflicts | default(false) | bool)
- not (guest_exists | default(false) | bool)
- ip_ping.rc == 0
- name: Provision LXC guest for project/env
ansible.builtin.include_role:
name: proxmox_vm
vars:
# NOTE: Use hostvars['localhost'] for defaults to avoid recursive self-references
proxmox_guest_type: "{{ project_def.guest_defaults.guest_type | default(hostvars['localhost'].proxmox_guest_type | default('lxc')) }}"
# Only pass vmid when provided; otherwise Proxmox will auto-allocate
lxc_vmid: "{{ guest_vmid if guest_vmid is not none else omit }}"
lxc_hostname: "{{ guest_name }}"
lxc_ostemplate: "{{ project_def.lxc_ostemplate | default(hostvars['localhost'].lxc_ostemplate) }}"
lxc_storage: "{{ project_def.lxc_storage | default(hostvars['localhost'].lxc_storage) }}"
lxc_network_bridge: "{{ project_def.lxc_network_bridge | default(hostvars['localhost'].lxc_network_bridge) }}"
lxc_unprivileged: "{{ project_def.lxc_unprivileged | default(hostvars['localhost'].lxc_unprivileged) }}"
lxc_features_list: "{{ project_def.lxc_features_list | default(hostvars['localhost'].lxc_features_list) }}"
lxc_cores: "{{ project_def.guest_defaults.cores | default(hostvars['localhost'].lxc_cores) }}"
lxc_memory_mb: "{{ project_def.guest_defaults.memory_mb | default(hostvars['localhost'].lxc_memory_mb) }}"
lxc_swap_mb: "{{ project_def.guest_defaults.swap_mb | default(hostvars['localhost'].lxc_swap_mb) }}"
lxc_rootfs_size_gb: "{{ project_def.guest_defaults.rootfs_size_gb | default(hostvars['localhost'].lxc_rootfs_size_gb) }}"
lxc_ip: "{{ env_def.ip }}"
lxc_gateway: "{{ env_def.gateway }}"
lxc_nameserver: "{{ project_def.lxc_nameserver | default(hostvars['localhost'].lxc_nameserver) }}"
lxc_pubkey: "{{ appuser_ssh_public_key | default('') }}"
lxc_start_after_create: "{{ project_def.lxc_start_after_create | default(hostvars['localhost'].lxc_start_after_create) }}"
- name: Wait for SSH to become available
ansible.builtin.wait_for:
host: "{{ (env_def.ip | string).split('/')[0] }}"
port: 22
timeout: 300
when: (env_def.ip | default('')) | length > 0
- name: Add guest to dynamic inventory
ansible.builtin.add_host:
name: "{{ guest_name }}"
groups:
- "app_all"
- "app_{{ project_key }}_all"
- "app_{{ project_key }}_{{ env_name }}"
ansible_host: "{{ (env_def.ip | string).split('/')[0] }}"
ansible_user: root
app_project: "{{ project_key }}"
app_env: "{{ env_name }}"

View File

@ -0,0 +1,21 @@
---
# Helper tasks file for playbooks/app/provision_vms.yml
# Provisions all envs for a single project and adds dynamic inventory hosts.
- name: Set project definition
ansible.builtin.set_fact:
project_def: "{{ app_projects[project_key] }}"
- name: "Preflight: validate env IPs are unique within project"
ansible.builtin.assert:
that:
- (project_env_ips | length) == ((project_env_ips | unique) | length)
fail_msg: "Duplicate IPs detected in app_projects.{{ project_key }}.envs (IPs must be unique): {{ project_env_ips }}"
vars:
project_env_ips: "{{ project_def.envs | dict2items | map(attribute='value.ip') | select('defined') | map('string') | map('regex_replace', '/.*$', '') | reject('equalto', '') | list }}"
when:
- project_def.envs is defined
- (project_def.envs | length) > 0
- name: Provision each environment for project
ansible.builtin.include_tasks: provision_one_env.yml
loop: "{{ project_def.envs | dict2items }}"
loop_control:
loop_var: env_item

View File

@ -0,0 +1,36 @@
---
# Playbook: app/provision_vms.yml
# Purpose: Provision Proxmox guests for app projects (LXC-first) based on `app_projects`.
# Targets: localhost (Proxmox API)
# Tags: app, provision
#
# Usage:
# - Run one project: ansible-playbook -i inventories/production playbooks/app/provision_vms.yml -e app_project=projectA
# - Run all projects: ansible-playbook -i inventories/production playbooks/app/provision_vms.yml
- name: Provision Proxmox guests for app projects
hosts: localhost
connection: local
gather_facts: false
vars:
selected_projects: >-
{{
(app_projects | dict2items | map(attribute='key') | list)
if (app_project is not defined or app_project | length == 0)
else [app_project]
}}
tasks:
- name: Validate requested project exists
ansible.builtin.assert:
that:
- app_project is not defined or app_project in app_projects
fail_msg: "Requested app_project={{ app_project }} does not exist in app_projects."
- name: Provision each project/env guest via Proxmox API
ansible.builtin.include_tasks: provision_one_guest.yml
loop: "{{ selected_projects }}"
loop_control:
loop_var: project_key

View File

@ -0,0 +1,99 @@
---
# Playbook: app/proxmox_info.yml
# Purpose: Query Proxmox API for VM/LXC info (status, node, name, vmid) and
# optionally filter to just the guests defined in `app_projects`.
# Targets: localhost
# Tags: app, proxmox, info
#
# Usage examples:
# - Show only projectA guests: ansible-playbook -i inventories/production playbooks/app/proxmox_info.yml -e app_project=projectA
# - Show all VMs/CTs on the cluster: ansible-playbook -i inventories/production playbooks/app/proxmox_info.yml -e proxmox_info_all=true
# - Restrict to only LXC: -e proxmox_info_type=lxc
- name: Proxmox inventory info (VMs and containers)
hosts: localhost
connection: local
gather_facts: false
vars:
selected_projects: >-
{{
(app_projects | dict2items | map(attribute='key') | list)
if (app_project is not defined or app_project | length == 0)
else [app_project]
}}
proxmox_info_all_default: false
proxmox_info_type_default: all # all|lxc|qemu
tasks:
- name: Validate requested project exists
ansible.builtin.assert:
that:
- app_project is not defined or app_project in app_projects
fail_msg: "Requested app_project={{ app_project | default('') }} does not exist in app_projects."
- name: Build list of expected VMIDs and names from app_projects
ansible.builtin.set_fact:
expected_vmids: >-
{{
selected_projects
| map('extract', app_projects)
| map(attribute='envs')
| map('dict2items')
| map('map', attribute='value')
| list
| flatten
| map(attribute='vmid')
| select('defined')
| list
}}
expected_names: >-
{{
selected_projects
| map('extract', app_projects)
| map(attribute='envs')
| map('dict2items')
| map('map', attribute='value')
| list
| flatten
| map(attribute='name')
| list
}}
- name: Query Proxmox for guest info
community.proxmox.proxmox_vm_info:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node | default(omit) }}"
type: "{{ proxmox_info_type | default(proxmox_info_type_default) }}"
config: none
register: proxmox_info
- name: Filter guests to expected VMIDs/names (unless proxmox_info_all)
ansible.builtin.set_fact:
filtered_guests: >-
{{
(proxmox_info.proxmox_vms | default([]))
if (proxmox_info_all | default(proxmox_info_all_default) | bool)
else (
(proxmox_info.proxmox_vms | default([]))
| selectattr('name', 'in', expected_names)
| list
)
}}
- name: Display Proxmox guest summary
ansible.builtin.debug:
msg: |
Proxmox: {{ proxmox_host }} (node={{ proxmox_node | default('any') }}, type={{ proxmox_info_type | default(proxmox_info_type_default) }})
Showing: {{ 'ALL guests' if (proxmox_info_all | default(proxmox_info_all_default) | bool) else ('app_projects for ' ~ (selected_projects | join(', '))) }}
{% for g in (filtered_guests | sort(attribute='vmid')) %}
- vmid={{ g.vmid }} type={{ g.id.split('/')[0] if g.id is defined else 'unknown' }} name={{ g.name | default('') }} node={{ g.node | default('') }} status={{ g.status | default('') }}
{% endfor %}

15
playbooks/app/site.yml Normal file
View File

@ -0,0 +1,15 @@
---
# Playbook: app/site.yml
# Purpose: End-to-end provisioning + configuration for app projects.
# Targets: localhost (provision) + dynamic inventory groups (configure)
# Tags: app
- name: Provision Proxmox guests
import_playbook: provision_vms.yml
tags: ['app', 'provision']
- name: Configure guests
import_playbook: configure_app.yml
tags: ['app', 'configure']

View File

@ -0,0 +1,51 @@
---
# Playbook: app/ssh_client_config.yml
# Purpose: Ensure ~/.ssh/config has convenient host aliases for project envs.
# Targets: localhost
# Tags: app, ssh-config
#
# Example:
# ssh projectA-dev
# ssh projectA-qa
# ssh projectA-prod
- name: Configure SSH client aliases for app projects
hosts: localhost
connection: local
gather_facts: false
vars:
manage_ssh_config: "{{ manage_ssh_config | default(false) }}"
ssh_config_path: "{{ lookup('ansible.builtin.env', 'HOME') + '/.ssh/config' }}"
selected_projects: >-
{{
(app_projects | dict2items | map(attribute='key') | list)
if (app_project is not defined or app_project | length == 0)
else [app_project]
}}
tasks:
- name: Skip if SSH config management disabled
ansible.builtin.meta: end_play
when: not manage_ssh_config | bool
- name: Ensure ~/.ssh directory exists
ansible.builtin.file:
path: "{{ lookup('ansible.builtin.env', 'HOME') + '/.ssh' }}"
state: directory
mode: "0700"
- name: Add SSH config entries for each project/env
community.general.ssh_config:
user_ssh_config_file: "{{ ssh_config_path }}"
host: "{{ app_projects[item.0].envs[item.1].name | default(item.0 ~ '-' ~ item.1) }}"
hostname: "{{ (app_projects[item.0].envs[item.1].ip | string).split('/')[0] }}"
user: "{{ appuser_name | default('appuser') }}"
identity_file: "{{ ssh_identity_file | default(omit) }}"
state: present
loop: "{{ selected_projects | product(['dev', 'qa', 'prod']) | list }}"
when:
- app_projects[item.0] is defined
- app_projects[item.0].envs[item.1] is defined
- (app_projects[item.0].envs[item.1].ip | default('')) | length > 0

View File

@ -13,3 +13,7 @@
- name: Tailscale VPN deployment
import_playbook: tailscale.yml
tags: ['tailscale']
- name: App projects on Proxmox (LXC-first)
import_playbook: app/site.yml
tags: ['app']

9
provision_vms.yml Normal file
View File

@ -0,0 +1,9 @@
---
# Wrapper playbook
# Purpose:
# ansible-playbook -i inventories/production provision_vms.yml -e app_project=projectA
- name: Provision app project guests
import_playbook: playbooks/app/provision_vms.yml

24
roles/app_setup/README.md Normal file
View File

@ -0,0 +1,24 @@
# `app_setup`
Creates the standard app filesystem layout and runtime services:
- `/srv/app/backend` and `/srv/app/frontend`
- `/srv/app/.env.<dev|qa|prod>`
- `/usr/local/bin/deploy_app.sh` (git pull, install deps, build, migrate, restart services)
- systemd units:
- `app-backend.service`
- `app-frontend.service`
All behavior is driven by variables so you can reuse this role for multiple projects.
## Variables
See [`defaults/main.yml`](defaults/main.yml). Common inputs in the app stack:
- `app_project`, `app_env` (used for naming and `.env.<env>` selection)
- `app_repo_url`, `app_repo_dest`, `app_repo_branch`
- `app_env_vars` (map written into `/srv/app/.env.<env>`)
- `components.backend`, `components.frontend` (enable/disable backend/frontend setup)
- `app_backend_dir`, `app_frontend_dir`, ports and Node.js commands

View File

@ -0,0 +1,40 @@
---
# Role: app_setup
# Purpose: app filesystem layout, env files, deploy script, and systemd units.
app_root: "/srv/app"
app_backend_dir: "{{ app_root }}/backend"
app_frontend_dir: "{{ app_root }}/frontend"
# Which environment file to render for this host: dev|qa|prod
app_env: dev
# Components (useful for single-repo projects)
app_enable_backend: true
app_enable_frontend: true
# Repo settings (project-driven)
app_repo_url: ""
app_repo_dest: "{{ app_root }}"
app_repo_branch: "main"
# Owner for app files
app_owner: "{{ appuser_name | default('appuser') }}"
app_group: "{{ appuser_name | default('appuser') }}"
# Ports
app_backend_port: 3001
app_frontend_port: 3000
# Commands (Node defaults; override per project as needed)
app_backend_install_cmd: "npm ci"
app_backend_migrate_cmd: "npm run migrate"
app_backend_start_cmd: "npm start"
app_frontend_install_cmd: "npm ci"
app_frontend_build_cmd: "npm run build"
app_frontend_start_cmd: "npm start"
# Arbitrary environment variables for the env file
app_env_vars: {}

View File

@ -0,0 +1,8 @@
---
# Role: app_setup handlers
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true

View File

@ -0,0 +1,84 @@
---
# Role: app_setup
# Purpose: create app layout, env file, deploy script, and systemd units.
- name: Ensure app root directory exists
ansible.builtin.file:
path: "{{ app_root }}"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: "0755"
- name: Ensure backend directory exists
ansible.builtin.file:
path: "{{ app_backend_dir }}"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: "0755"
when: app_enable_backend | bool
- name: Ensure frontend directory exists
ansible.builtin.file:
path: "{{ app_frontend_dir }}"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: "0755"
when: app_enable_frontend | bool
- name: Deploy environment file for this env
ansible.builtin.template:
src: env.j2
dest: "{{ app_root }}/.env.{{ app_env }}"
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: "0640"
- name: Deploy deploy script
ansible.builtin.template:
src: deploy_app.sh.j2
dest: /usr/local/bin/deploy_app.sh
owner: root
group: root
mode: "0755"
- name: Deploy systemd unit for backend
ansible.builtin.template:
src: app-backend.service.j2
dest: /etc/systemd/system/app-backend.service
owner: root
group: root
mode: "0644"
notify: Reload systemd
when: app_enable_backend | bool
- name: Deploy systemd unit for frontend
ansible.builtin.template:
src: app-frontend.service.j2
dest: /etc/systemd/system/app-frontend.service
owner: root
group: root
mode: "0644"
notify: Reload systemd
when: app_enable_frontend | bool
- name: Ensure systemd is reloaded before enabling services
ansible.builtin.meta: flush_handlers
- name: Enable and start backend service
ansible.builtin.systemd:
name: app-backend.service
enabled: true
state: started
when: app_enable_backend | bool
- name: Enable and start frontend service
ansible.builtin.systemd:
name: app-frontend.service
enabled: true
state: started
when: app_enable_frontend | bool

View File

@ -0,0 +1,19 @@
[Unit]
Description=App Backend ({{ app_env }})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User={{ app_owner }}
Group={{ app_group }}
WorkingDirectory={{ app_backend_dir }}
EnvironmentFile={{ app_root }}/.env.{{ app_env }}
ExecStart=/usr/bin/env bash -lc '{{ app_backend_start_cmd }}'
Restart=on-failure
RestartSec=3
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,19 @@
[Unit]
Description=App Frontend ({{ app_env }})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User={{ app_owner }}
Group={{ app_group }}
WorkingDirectory={{ app_frontend_dir }}
EnvironmentFile={{ app_root }}/.env.{{ app_env }}
ExecStart=/usr/bin/env bash -lc '{{ app_frontend_start_cmd }}'
Restart=on-failure
RestartSec=3
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Ansible-managed deploy script
set -euo pipefail
REPO_URL="{{ app_repo_url }}"
BRANCH="{{ app_repo_branch }}"
APP_ROOT="{{ app_repo_dest }}"
BACKEND_DIR="{{ app_backend_dir }}"
FRONTEND_DIR="{{ app_frontend_dir }}"
ENV_FILE="{{ app_root }}/.env.{{ app_env }}"
echo "[deploy] repo=${REPO_URL} branch=${BRANCH} root=${APP_ROOT}"
if [[ ! -d "${APP_ROOT}/.git" ]]; then
echo "[deploy] cloning repo"
install -d -m 0755 "${APP_ROOT}"
git clone --branch "${BRANCH}" --single-branch "${REPO_URL}" "${APP_ROOT}"
fi
echo "[deploy] syncing branch"
git -C "${APP_ROOT}" fetch origin --prune
if ! git -C "${APP_ROOT}" rev-parse --verify --quiet "refs/remotes/origin/${BRANCH}" >/dev/null; then
echo "[deploy] ERROR: branch '${BRANCH}' not found on origin"
exit 2
fi
git -C "${APP_ROOT}" checkout -B "${BRANCH}" "origin/${BRANCH}"
git -C "${APP_ROOT}" pull --ff-only origin "${BRANCH}"
if [[ "{{ app_enable_backend | bool }}" == "True" ]]; then
echo "[deploy] backend install"
cd "${BACKEND_DIR}"
{{ app_backend_install_cmd }}
echo "[deploy] backend migrations"
{{ app_backend_migrate_cmd }}
fi
if [[ "{{ app_enable_frontend | bool }}" == "True" ]]; then
echo "[deploy] frontend install"
cd "${FRONTEND_DIR}"
{{ app_frontend_install_cmd }}
echo "[deploy] frontend build"
{{ app_frontend_build_cmd }}
fi
echo "[deploy] restarting services"
{% if app_enable_backend | bool %}
systemctl restart app-backend.service
{% endif %}
{% if app_enable_frontend | bool %}
systemctl restart app-frontend.service
{% endif %}
echo "[deploy] done"

View File

@ -0,0 +1,13 @@
# Ansible-managed environment file for {{ app_env }}
# Loaded by systemd units and deploy script.
# Common
APP_ENV={{ app_env }}
BACKEND_PORT={{ app_backend_port }}
FRONTEND_PORT={{ app_frontend_port }}
{% for k, v in (app_env_vars | default({})).items() %}
{{ k }}={{ v }}
{% endfor %}

21
roles/base_os/README.md Normal file
View File

@ -0,0 +1,21 @@
# `base_os`
Baseline OS configuration for app guests:
- Installs required packages (git/curl/nodejs/npm/ufw/openssh-server/etc.)
- Creates deployment user (default `appuser`) with passwordless sudo
- Adds your authorized SSH key
- Configures UFW to allow SSH + backend/frontend ports
## Variables
See [`defaults/main.yml`](defaults/main.yml). Common inputs in the app stack:
- `appuser_name`, `appuser_groups`, `appuser_shell`
- `appuser_ssh_public_key` (usually `{{ vault_ssh_public_key }}`)
- `components.backend`, `components.frontend` (enable/disable firewall rules per component)
- `app_backend_port`, `app_frontend_port`
This role is used by `playbooks/app/configure_app.yml` after provisioning.

View File

@ -0,0 +1,32 @@
---
# Role: base_os
# Purpose: baseline OS configuration for app guests (packages, appuser, firewall).
base_os_packages:
- git
- curl
- ca-certificates
- openssh-server
- sudo
- ufw
- python3
- python3-apt
- nodejs
- npm
base_os_allow_ssh_port: 22
# App ports (override per project)
base_os_backend_port: "{{ app_backend_port | default(3001) }}"
base_os_frontend_port: "{{ app_frontend_port | default(3000) }}"
base_os_enable_backend: true
base_os_enable_frontend: true
base_os_user: "{{ appuser_name | default('appuser') }}"
base_os_user_shell: "{{ appuser_shell | default('/bin/bash') }}"
base_os_user_groups: "{{ appuser_groups | default(['sudo']) }}"
base_os_user_ssh_public_key: "{{ appuser_ssh_public_key | default('') }}"
# If true, create passwordless sudo for base_os_user.
base_os_passwordless_sudo: true

View File

@ -0,0 +1,8 @@
---
# Role: base_os handlers
- name: Reload ufw
ansible.builtin.command: ufw reload
changed_when: false

View File

@ -0,0 +1,65 @@
---
# Role: base_os
# Purpose: baseline OS config for app guests.
- name: Ensure apt cache is up to date
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: Install baseline packages
ansible.builtin.apt:
name: "{{ base_os_packages }}"
state: present
- name: Ensure app user exists
ansible.builtin.user:
name: "{{ base_os_user }}"
shell: "{{ base_os_user_shell }}"
groups: "{{ base_os_user_groups }}"
append: true
create_home: true
state: present
- name: Ensure app user has authorized SSH key
ansible.posix.authorized_key:
user: "{{ base_os_user }}"
state: present
key: "{{ base_os_user_ssh_public_key }}"
when: base_os_user_ssh_public_key | length > 0
- name: Configure passwordless sudo for app user
ansible.builtin.copy:
dest: "/etc/sudoers.d/{{ base_os_user }}"
content: "{{ base_os_user }} ALL=(ALL) NOPASSWD:ALL\n"
owner: root
group: root
mode: "0440"
when: base_os_passwordless_sudo | bool
- name: Ensure UFW allows SSH
ansible.builtin.ufw:
rule: allow
port: "{{ base_os_allow_ssh_port }}"
proto: tcp
- name: Ensure UFW allows backend port
ansible.builtin.ufw:
rule: allow
port: "{{ base_os_backend_port }}"
proto: tcp
when: base_os_enable_backend | bool
- name: Ensure UFW allows frontend port
ansible.builtin.ufw:
rule: allow
port: "{{ base_os_frontend_port }}"
proto: tcp
when: base_os_enable_frontend | bool
- name: Enable UFW (deny incoming by default)
ansible.builtin.ufw:
state: enabled
policy: deny

27
roles/pote/README.md Normal file
View File

@ -0,0 +1,27 @@
# `pote`
Deploys the **POTE** project as a Python/venv application (no HTTP services required) and schedules cron jobs.
## What it does
- Installs required system packages (git, python3.11/venv, build deps, postgresql server/client)
- Ensures a dedicated OS user exists (default: `poteapp`)
- Creates PostgreSQL database and user
- Clones/updates the repo from an SSH remote using a vault-backed private key
- Creates a Python virtualenv and installs from `pyproject.toml` (editable mode)
- Renders an environment file (default: `{{ pote_app_dir }}/.env`)
- Runs Alembic database migrations
- Installs cron jobs (daily/weekly/health-check)
## Key variables
See `defaults/main.yml`. Common inputs:
- `pote_git_repo`, `pote_git_branch`
- `pote_git_ssh_key` (set `vault_pote_git_ssh_key` in your vault)
- `pote_user`, `pote_app_dir`, `pote_venv_dir`
- `pote_db_*`, `pote_smtp_*`
- `pote_enable_cron`, `pote_*_time`, `pote_*_job`

View File

@ -0,0 +1,116 @@
---
# Role: pote
# Purpose: Deploy POTE (Python/venv + cron) from a Git repo via SSH.
# -----------------------------------------------------------------------------
# Git / source
# -----------------------------------------------------------------------------
pote_git_repo: ""
pote_git_branch: "main"
# SSH private key used to clone/pull (vault-backed). Keep this secret.
# Prefer setting `vault_pote_git_ssh_key` in your vault; `vault_git_ssh_key` is supported for compatibility.
pote_git_ssh_key: "{{ vault_pote_git_ssh_key | default(vault_git_ssh_key | default('')) }}"
# Host/IP for known_hosts (so first clone is non-interactive).
pote_git_host: "10.0.30.169"
pote_git_port: 22
# -----------------------------------------------------------------------------
# User / paths
# -----------------------------------------------------------------------------
pote_user: "poteapp"
pote_group: "{{ pote_user }}"
pote_app_dir: "/home/{{ pote_user }}/pote"
pote_venv_dir: "{{ pote_app_dir }}/venv"
pote_python_bin: "python3.11"
# Environment file
pote_env_file: "{{ pote_app_dir }}/.env"
pote_env_file_mode: "0600"
# Logs
pote_logs_dir: "/home/{{ pote_user }}/logs"
pote_log_level: "INFO"
pote_log_file: "{{ pote_logs_dir }}/pote.log"
# Monitoring / alerting (optional)
pote_market_tickers: ""
pote_alert_min_severity: ""
# Optional API keys
pote_quiverquant_api_key: ""
pote_fmp_api_key: ""
# -----------------------------------------------------------------------------
# System deps
# -----------------------------------------------------------------------------
pote_system_packages:
- git
- ca-certificates
- python3.11
- python3.11-venv
- python3.11-dev
- python3-pip
- build-essential
- postgresql
- postgresql-contrib
- postgresql-client
- libpq-dev
# -----------------------------------------------------------------------------
# Database
# -----------------------------------------------------------------------------
pote_db_host: "localhost"
pote_db_port: 5432
pote_db_name: "potedb"
pote_db_user: "poteuser"
# Prefer env-specific vault vars; fall back to a generic one if present.
pote_db_password: >-
{{
vault_pote_db_password
| default(
(vault_pote_db_password_dev | default(vault_db_password_dev | default(''), true)) if pote_env == 'dev'
else (vault_pote_db_password_qa | default(vault_db_password_qa | default(''), true)) if pote_env == 'qa'
else (vault_pote_db_password_prod | default(vault_db_password_prod | default(''), true)) if pote_env == 'prod'
else '',
true
)
}}
# Convenience computed URL (commonly used by Python apps)
pote_database_url: "postgresql://{{ pote_db_user }}:{{ pote_db_password }}@{{ pote_db_host }}:{{ pote_db_port }}/{{ pote_db_name }}"
# -----------------------------------------------------------------------------
# SMTP / email
# -----------------------------------------------------------------------------
pote_smtp_host: "mail.levkin.ca"
pote_smtp_port: 587
pote_smtp_user: ""
pote_smtp_password: "{{ vault_pote_smtp_password | default(vault_smtp_password | default('')) }}"
pote_from_email: ""
pote_report_recipients: ""
# -----------------------------------------------------------------------------
# Automation / cron
# -----------------------------------------------------------------------------
pote_enable_cron: true
# "minute hour" (e.g. "0 6")
pote_daily_report_time: "0 6"
# "minute hour dow" (e.g. "0 8 0" => Sunday 08:00)
pote_weekly_report_time: "0 8 0"
# "minute hour" for */6 style (e.g. "0 */6")
pote_health_check_time: "0 */6"
pote_daily_report_enabled: true
pote_weekly_report_enabled: true
pote_health_check_enabled: true
# Commands (adjust to your repos actual scripts)
pote_daily_job: "{{ pote_app_dir }}/scripts/automated_daily_run.sh >> {{ pote_logs_dir }}/daily_run.log 2>&1"
pote_weekly_job: "{{ pote_app_dir }}/scripts/automated_weekly_run.sh >> {{ pote_logs_dir }}/weekly_run.log 2>&1"
pote_health_check_job: "{{ pote_venv_dir }}/bin/python {{ pote_app_dir }}/scripts/health_check.py >> {{ pote_logs_dir }}/health_check.log 2>&1"
# Environment name for templating/logging (dev|qa|prod)
pote_env: "{{ app_env | default('prod') }}"

227
roles/pote/tasks/main.yml Normal file
View File

@ -0,0 +1,227 @@
---
# Role: pote
# Purpose: Deploy POTE (python/venv) and schedule cron jobs.
- name: Ensure POTE system dependencies are installed
ansible.builtin.apt:
name: "{{ pote_system_packages }}"
state: present
update_cache: true
cache_valid_time: 3600
- name: Ensure POTE group exists
ansible.builtin.group:
name: "{{ pote_group }}"
state: present
- name: Ensure POTE user exists
ansible.builtin.user:
name: "{{ pote_user }}"
group: "{{ pote_group }}"
shell: /bin/bash
create_home: true
state: present
- name: Ensure POTE app directory exists
ansible.builtin.file:
path: "{{ pote_app_dir }}"
state: directory
owner: "{{ pote_user }}"
group: "{{ pote_group }}"
mode: "0755"
- name: Ensure SSH directory exists for POTE user
ansible.builtin.file:
path: "/home/{{ pote_user }}/.ssh"
state: directory
owner: "{{ pote_user }}"
group: "{{ pote_group }}"
mode: "0700"
- name: Install Git SSH key for POTE (vault-backed)
ansible.builtin.copy:
dest: "/home/{{ pote_user }}/.ssh/id_ed25519"
content: "{{ pote_git_ssh_key }}"
owner: "{{ pote_user }}"
group: "{{ pote_group }}"
mode: "0600"
no_log: true
when: (pote_git_ssh_key | default('')) | length > 0
- name: Fetch Git host key (ssh-keyscan)
ansible.builtin.command: "ssh-keyscan -p {{ pote_git_port }} -H {{ pote_git_host }}"
register: pote_ssh_keyscan
changed_when: false
failed_when: false
when: (pote_git_host | default('')) | length > 0
- name: Ensure Git host is in known_hosts for POTE user
ansible.builtin.known_hosts:
path: "/home/{{ pote_user }}/.ssh/known_hosts"
name: "{{ pote_git_host }}"
key: "{{ pote_ssh_keyscan.stdout }}"
state: present
when:
- (pote_git_host | default('')) | length > 0
- (pote_ssh_keyscan.stdout | default('')) | length > 0
- name: Clone/update POTE repository
block:
- name: Clone/update POTE repository (git over SSH)
ansible.builtin.git:
repo: "{{ pote_git_repo }}"
dest: "{{ pote_app_dir }}"
version: "{{ pote_git_branch }}"
key_file: "/home/{{ pote_user }}/.ssh/id_ed25519"
accept_hostkey: true
update: true
become: true
become_user: "{{ pote_user }}"
register: pote_git_result
rescue:
- name: Abort with actionable Git SSH guidance
ansible.builtin.fail:
msg: >-
Failed to clone {{ pote_git_repo }} (branch={{ pote_git_branch }}) as user {{ pote_user }}.
Common causes:
- vault_pote_git_ssh_key is not a valid OpenSSH private key (or is passphrase-protected)
- the public key is not added to Gitea as a deploy key / user key with access to ilia/POTE
- repo or branch name is wrong
Error: {{ pote_git_result.msg | default(pote_git_result.stderr | default('unknown error')) }}
- name: Ensure PostgreSQL is running
ansible.builtin.systemd:
name: postgresql
state: started
enabled: true
- name: Check if PostgreSQL role exists
ansible.builtin.command: "psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{{ pote_db_user }}'\""
become: true
become_user: postgres
register: pote_pg_role_check
changed_when: false
- name: Create PostgreSQL user for POTE
ansible.builtin.command: "psql -c \"CREATE USER {{ pote_db_user }} WITH PASSWORD '{{ pote_db_password }}'\""
become: true
become_user: postgres
when: (pote_pg_role_check.stdout | trim) != '1'
changed_when: true
- name: Ensure PostgreSQL user password is set (idempotent)
ansible.builtin.command: "psql -c \"ALTER USER {{ pote_db_user }} WITH PASSWORD '{{ pote_db_password }}'\""
become: true
become_user: postgres
when: (pote_db_password | default('')) | length > 0
changed_when: false
- name: Check if PostgreSQL database exists
ansible.builtin.command: "psql -tAc \"SELECT 1 FROM pg_database WHERE datname='{{ pote_db_name }}'\""
become: true
become_user: postgres
register: pote_pg_db_check
changed_when: false
- name: Create PostgreSQL database for POTE
ansible.builtin.command: "psql -c \"CREATE DATABASE {{ pote_db_name }} OWNER {{ pote_db_user }}\""
become: true
become_user: postgres
when: (pote_pg_db_check.stdout | trim) != '1'
changed_when: true
- name: Ensure Python virtual environment exists
ansible.builtin.command: "{{ pote_python_bin }} -m venv {{ pote_venv_dir }}"
args:
creates: "{{ pote_venv_dir }}/bin/activate"
become: true
become_user: "{{ pote_user }}"
- name: Upgrade pip in venv
ansible.builtin.pip:
name: pip
state: present
virtualenv: "{{ pote_venv_dir }}"
become: true
become_user: "{{ pote_user }}"
- name: Deploy POTE environment file
ansible.builtin.template:
src: env.j2
dest: "{{ pote_env_file }}"
owner: "{{ pote_user }}"
group: "{{ pote_group }}"
mode: "{{ pote_env_file_mode }}"
- name: Install POTE in editable mode (pyproject.toml)
ansible.builtin.pip:
name: "{{ pote_app_dir }}"
editable: true
virtualenv: "{{ pote_venv_dir }}"
become: true
become_user: "{{ pote_user }}"
- name: Run Alembic migrations
ansible.builtin.command: "{{ pote_venv_dir }}/bin/alembic upgrade head"
args:
chdir: "{{ pote_app_dir }}"
become: true
become_user: "{{ pote_user }}"
changed_when: false
- name: Ensure logs directory exists
ansible.builtin.file:
path: "{{ pote_logs_dir }}"
state: directory
owner: "{{ pote_user }}"
group: "{{ pote_group }}"
mode: "0755"
- name: Ensure automation shell scripts are executable
ansible.builtin.file:
path: "{{ pote_app_dir }}/scripts/{{ item }}"
mode: "0755"
loop:
- automated_daily_run.sh
- automated_weekly_run.sh
- setup_cron.sh
- setup_automation.sh
become: true
become_user: "{{ pote_user }}"
- name: Install cron job - daily report
ansible.builtin.cron:
name: "POTE daily report"
minute: "{{ pote_daily_report_time.split()[0] }}"
hour: "{{ pote_daily_report_time.split()[1] }}"
job: "{{ pote_daily_job }}"
user: "{{ pote_user }}"
state: present
when:
- pote_enable_cron | bool
- pote_daily_report_enabled | bool
- name: Install cron job - weekly report
ansible.builtin.cron:
name: "POTE weekly report"
minute: "{{ pote_weekly_report_time.split()[0] }}"
hour: "{{ pote_weekly_report_time.split()[1] }}"
weekday: "{{ pote_weekly_report_time.split()[2] }}"
job: "{{ pote_weekly_job }}"
user: "{{ pote_user }}"
state: present
when:
- pote_enable_cron | bool
- pote_weekly_report_enabled | bool
- name: Install cron job - health check
ansible.builtin.cron:
name: "POTE health check"
minute: "{{ pote_health_check_time.split()[0] }}"
hour: "{{ pote_health_check_time.split()[1] }}"
job: "{{ pote_health_check_job }}"
user: "{{ pote_user }}"
state: present
when:
- pote_enable_cron | bool
- pote_health_check_enabled | bool

View File

@ -0,0 +1,27 @@
### Ansible-managed POTE environment
POTE_ENV="{{ pote_env }}"
# Database
DATABASE_URL="{{ pote_database_url }}"
# Email
SMTP_HOST="{{ pote_smtp_host }}"
SMTP_PORT="{{ pote_smtp_port }}"
SMTP_USER="{{ pote_smtp_user }}"
SMTP_PASSWORD="{{ pote_smtp_password }}"
FROM_EMAIL="{{ pote_from_email }}"
REPORT_RECIPIENTS="{{ pote_report_recipients }}"
# Monitoring / alerting (optional)
MARKET_MONITOR_TICKERS="{{ pote_market_tickers | default('') }}"
ALERT_MIN_SEVERITY="{{ pote_alert_min_severity | default('') }}"
# Logging
LOG_LEVEL="{{ pote_log_level }}"
LOG_FILE="{{ pote_log_file }}"
# Optional API keys
QUIVERQUANT_API_KEY="{{ pote_quiverquant_api_key | default('') }}"
FMP_API_KEY="{{ pote_fmp_api_key | default('') }}"

View File

@ -1,64 +1,82 @@
# Role: proxmox_vm
# Role: `proxmox_vm`
## Description
Creates and configures virtual machines on Proxmox VE hypervisor with cloud-init support and automated provisioning.
Provision Proxmox guests via API. This role supports **both**:
- **LXC containers** (`proxmox_guest_type: lxc`) via `community.proxmox.proxmox`
- **KVM VMs** (`proxmox_guest_type: kvm`) via `community.general.proxmox_kvm`
The entry point is `roles/proxmox_vm/tasks/main.yml`, which dispatches to `tasks/lxc.yml` or `tasks/kvm.yml`.
## Requirements
- Ansible 2.9+
- Proxmox VE server
- `community.general` collection
- Valid Proxmox credentials in vault
## Features
- Automated VM creation with cloud-init
- Configurable CPU, memory, and disk resources
- Network configuration with DHCP or static IP
- SSH key injection for passwordless access
- Ubuntu Server template support
- Ansible (project tested with modern Ansible; older 2.9-era setups may need adjustments)
- Proxmox VE API access
- Collections:
- `community.proxmox`
- `community.general` (for `proxmox_kvm`)
- Python lib on the control machine:
- `proxmoxer` (installed by `make bootstrap` / `requirements.txt`)
## Variables
## Authentication (vault-backed)
| Variable | Default | Description |
|----------|---------|-------------|
| `vm_memory` | `8192` | RAM allocation in MB |
| `vm_cores` | `2` | Number of CPU cores |
| `vm_disk_size` | `20G` | Disk size |
| `vm_iso` | `ubuntu-24.04-live-server-amd64.iso` | Installation ISO |
| `vm_ciuser` | `master` | Default cloud-init user |
| `vm_storage` | `local-lvm` | Proxmox storage backend |
Store secrets in `inventories/production/group_vars/all/vault.yml`:
## Vault Variables (Required)
- `vault_proxmox_host`
- `vault_proxmox_user`
- `vault_proxmox_password` (or token auth)
- `vault_proxmox_token_id` (optional)
- `vault_proxmox_token` (optional)
- `vault_ssh_public_key` (used for bootstrap access where applicable)
| Variable | Description |
|----------|-------------|
| `vault_proxmox_host` | Proxmox server IP/hostname |
| `vault_proxmox_user` | Proxmox username (e.g., root@pam) |
| `vault_proxmox_password` | Proxmox password |
| `vault_vm_cipassword` | VM default user password |
| `vault_ssh_public_key` | SSH public key for VM access |
## Key variables
## Dependencies
- Proxmox VE server with API access
- ISO images uploaded to Proxmox storage
Common:
## Example Playbook
- `proxmox_guest_type`: `lxc` or `kvm`
- `proxmox_host`, `proxmox_user`, `proxmox_node`
- `proxmox_api_port` (default `8006`)
- `proxmox_validate_certs` (default `false`)
LXC (`tasks/lxc.yml`):
- `lxc_vmid`, `lxc_hostname`
- `lxc_ostemplate` (e.g. `local:vztmpl/debian-12-standard_*.tar.zst`)
- `lxc_storage` (default `local-lvm`)
- `lxc_network_bridge` (default `vmbr0`)
- `lxc_ip` (CIDR), `lxc_gateway`
- `lxc_cores`, `lxc_memory_mb`, `lxc_swap_mb`, `lxc_rootfs_size_gb`
KVM (`tasks/kvm.yml`):
- `vm_id`, `vm_name`
- `vm_cores`, `vm_memory`, `vm_disk_size`
- `vm_storage`, `vm_network_bridge`
- cloud-init parameters used by the existing KVM provisioning flow
## Safety guardrails
LXC provisioning includes a VMID collision guardrail:
- If the target VMID already exists but the guest name does not match the expected name, provisioning fails.
- Override only if you really mean it: `-e allow_vmid_collision=true`
## Example usage
Provisioning is typically orchestrated by `playbooks/app/provision_vms.yml`, but you can call the role directly:
```yaml
- hosts: localhost
roles:
- role: proxmox_vm
vm_name: "test-vm"
vm_id: 999
vm_memory: 4096
```
## Tags
- `proxmox`: All Proxmox operations
- `vm`: VM creation tasks
- `infrastructure`: Infrastructure provisioning
## Notes
- Requires Proxmox API credentials in vault
- VM IDs must be unique on Proxmox cluster
- Cloud-init requires compatible ISO images
- VMs are created but not started by default
- name: Provision one LXC
hosts: localhost
connection: local
gather_facts: false
tasks:
- name: Create/update container
ansible.builtin.include_role:
name: proxmox_vm
vars:
proxmox_guest_type: lxc
lxc_vmid: 9301
lxc_hostname: projectA-dev
lxc_ip: "10.0.10.101/24"
lxc_gateway: "10.0.10.1"
```

View File

@ -25,3 +25,31 @@ vm_nameservers: "8.8.8.8 8.8.4.4"
vm_start_after_create: true
vm_enable_agent: true
vm_boot_order: "order=scsi0"
# -----------------------------------------------------------------------------
# Proxmox LXC defaults (used when proxmox_guest_type == 'lxc')
# -----------------------------------------------------------------------------
lxc_vmid: 300
lxc_hostname: "app-container"
lxc_ostemplate: "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
lxc_storage: "local-lvm"
lxc_network_bridge: "vmbr0"
lxc_ip: "" # e.g. "10.0.10.101/24"
lxc_gateway: "" # e.g. "10.0.10.1"
lxc_nameserver: "1.1.1.1 8.8.8.8"
lxc_unprivileged: true
# Use list form because community.proxmox.proxmox expects list for `features`
lxc_features_list:
- "keyctl=1"
- "nesting=1"
lxc_cores: 2
lxc_memory_mb: 2048
lxc_swap_mb: 512
lxc_rootfs_size_gb: 16
# Add to /root/.ssh/authorized_keys (bootstrap). Override with appuser_ssh_public_key.
lxc_pubkey: ""
lxc_start_after_create: true

View File

@ -0,0 +1,82 @@
---
# Proxmox QEMU VM provisioning via API (cloud-init).
# This task file preserves the repo's existing VM behavior.
# Break down the Proxmox VM creation to avoid "file name too long" error
- name: Set VM configuration facts
ansible.builtin.set_fact:
vm_scsi_config:
scsi0: "{{ vm_storage }}:{{ vm_disk_size }},format=raw"
vm_net_config:
net0: "virtio,bridge={{ vm_network_bridge }},firewall=1"
vm_ide_config:
ide2: "{{ vm_iso_storage }}:cloudinit,format=qcow2"
vm_ipconfig:
ipconfig0: "{{ vm_ip_config }}"
- name: Create VM on Proxmox
community.general.proxmox_kvm:
# Connection
api_host: "{{ proxmox_host }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password }}"
api_token_id: "{{ proxmox_token_id | default(omit) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
# VM identification
vmid: "{{ vm_id }}"
name: "{{ vm_name }}"
node: "{{ proxmox_node }}"
# Hardware specs
memory: "{{ vm_memory }}"
cores: "{{ vm_cores }}"
sockets: "{{ vm_sockets }}"
cpu: "host"
# Storage and network
scsi: "{{ vm_scsi_config }}"
net: "{{ vm_net_config }}"
ide: "{{ vm_ide_config }}"
# Boot and OS
boot: "{{ vm_boot_order }}"
ostype: "{{ vm_os_type }}"
# Cloud-init
ciuser: "{{ vm_ciuser }}"
cipassword: "{{ vault_vm_cipassword | default(omit) }}"
sshkeys: "{{ vm_ssh_keys | join('\n') if vm_ssh_keys else omit }}"
ipconfig: "{{ vm_ipconfig }}"
nameserver: "{{ vm_nameservers }}"
# VM options
agent: "{{ vm_enable_agent | bool }}"
autostart: false
balloon: 0
state: present
register: vm_creation_result
- name: Start VM if requested
community.general.proxmox_kvm:
api_host: "{{ proxmox_host }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password }}"
api_token_id: "{{ proxmox_token_id | default(omit) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
vmid: "{{ vm_id }}"
node: "{{ proxmox_node }}"
state: started
when: vm_start_after_create | bool
- name: Display VM creation results
ansible.builtin.debug:
msg: |
VM Created: {{ vm_name }} (ID: {{ vm_id }})
Memory: {{ vm_memory }}MB
Cores: {{ vm_cores }}
Storage: {{ vm_storage }}:{{ vm_disk_size }}
Network: {{ vm_network_bridge }}
Status: {{ vm_creation_result.msg | default('Created') }}

View File

@ -0,0 +1,82 @@
---
# Proxmox LXC container provisioning via API.
#
# This uses `community.proxmox.proxmox` because it is widely available and
# supports idempotent updates via `update: true`.
- name: Build LXC netif configuration
ansible.builtin.set_fact:
lxc_netif_config:
# IMPORTANT: Proxmox requires net0 to be a single comma-delimited string.
# Avoid folded YAML blocks here (they can introduce newlines/spaces).
net0: >-
{{
(
['name=eth0', 'bridge=' ~ lxc_network_bridge, 'firewall=1']
+ (['ip=' ~ lxc_ip] if (lxc_ip is defined and (lxc_ip | string | length) > 0) else [])
+ (['gw=' ~ lxc_gateway] if (lxc_gateway is defined and (lxc_gateway | string | length) > 0) else [])
) | join(',')
}}
- name: Ensure LXC container is present (create or update)
community.proxmox.proxmox:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
# Only pass token params when they are set (avoid empty-string triggering required-together errors)
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
vmid: "{{ lxc_vmid | default(omit) }}"
hostname: "{{ lxc_hostname }}"
ostemplate: "{{ lxc_ostemplate }}"
unprivileged: "{{ lxc_unprivileged | bool }}"
features: "{{ lxc_features_list | default(omit) }}"
cores: "{{ lxc_cores }}"
memory: "{{ lxc_memory_mb }}"
swap: "{{ lxc_swap_mb }}"
# rootfs sizing (GiB). disk_volume is less version-sensitive than string `disk`.
disk_volume:
storage: "{{ lxc_storage }}"
size: "{{ lxc_rootfs_size_gb }}"
netif: "{{ lxc_netif_config }}"
nameserver: "{{ lxc_nameserver | default(omit) }}"
# Bootstrap root SSH access (used by Ansible until appuser exists).
pubkey: "{{ lxc_pubkey | default(omit) }}"
password: "{{ vault_lxc_root_password | default(omit) }}"
update: true
state: present
register: lxc_present
- name: Ensure LXC container is started
community.proxmox.proxmox:
api_host: "{{ proxmox_host }}"
api_port: "{{ proxmox_api_port | default(8006) }}"
validate_certs: "{{ proxmox_validate_certs | default(false) }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password | default(omit) }}"
api_token_id: "{{ proxmox_token_id | default(omit, true) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit, true) }}"
node: "{{ proxmox_node }}"
vmid: "{{ lxc_vmid | default(omit) }}"
state: started
when: lxc_start_after_create | bool
- name: Display LXC provisioning results
ansible.builtin.debug:
msg: |
LXC Present: {{ lxc_hostname }} (VMID: {{ lxc_vmid }})
Cores: {{ lxc_cores }}
Memory: {{ lxc_memory_mb }}MB (swap {{ lxc_swap_mb }}MB)
RootFS: {{ lxc_storage }}:{{ lxc_rootfs_size_gb }}
Net: {{ lxc_network_bridge }} / {{ lxc_ip | default('dhcp/unspecified') }}
Changed: {{ lxc_present.changed | default(false) }}

View File

@ -1,77 +1,13 @@
---
# Break down the Proxmox VM creation to avoid "file name too long" error
- name: Set VM configuration facts
ansible.builtin.set_fact:
vm_scsi_config:
scsi0: "{{ vm_storage }}:{{ vm_disk_size }},format=raw"
vm_net_config:
net0: "virtio,bridge={{ vm_network_bridge }},firewall=1"
vm_ide_config:
ide2: "{{ vm_iso_storage }}:cloudinit,format=qcow2"
vm_ipconfig:
ipconfig0: "{{ vm_ip_config }}"
# Proxmox guest provisioning dispatcher.
#
# - `proxmox_guest_type: lxc` uses `tasks/lxc.yml`
# - default uses `tasks/kvm.yml` (existing behavior)
- name: Create VM on Proxmox
community.general.proxmox_kvm:
# Connection
api_host: "{{ proxmox_host }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password }}"
api_token_id: "{{ proxmox_token_id | default(omit) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
- name: Provision LXC container
ansible.builtin.include_tasks: lxc.yml
when: (proxmox_guest_type | default('kvm')) == 'lxc'
# VM identification
vmid: "{{ vm_id }}"
name: "{{ vm_name }}"
node: "{{ proxmox_node }}"
# Hardware specs
memory: "{{ vm_memory }}"
cores: "{{ vm_cores }}"
sockets: "{{ vm_sockets }}"
cpu: "host"
# Storage and network
scsi: "{{ vm_scsi_config }}"
net: "{{ vm_net_config }}"
ide: "{{ vm_ide_config }}"
# Boot and OS
boot: "{{ vm_boot_order }}"
ostype: "{{ vm_os_type }}"
# Cloud-init
ciuser: "{{ vm_ciuser }}"
cipassword: "{{ vault_vm_cipassword | default(omit) }}"
sshkeys: "{{ vm_ssh_keys | join('\n') if vm_ssh_keys else omit }}"
ipconfig: "{{ vm_ipconfig }}"
nameserver: "{{ vm_nameservers }}"
# VM options
agent: "{{ vm_enable_agent | bool }}"
autostart: false
balloon: 0
state: present
register: vm_creation_result
- name: Start VM if requested
community.general.proxmox_kvm:
api_host: "{{ proxmox_host }}"
api_user: "{{ proxmox_user }}"
api_password: "{{ vault_proxmox_password }}"
api_token_id: "{{ proxmox_token_id | default(omit) }}"
api_token_secret: "{{ vault_proxmox_token | default(omit) }}"
vmid: "{{ vm_id }}"
node: "{{ proxmox_node }}"
state: started
when: vm_start_after_create | bool
- name: Display VM creation results
ansible.builtin.debug:
msg: |
VM Created: {{ vm_name }} (ID: {{ vm_id }})
Memory: {{ vm_memory }}MB
Cores: {{ vm_cores }}
Storage: {{ vm_storage }}:{{ vm_disk_size }}
Network: {{ vm_network_bridge }}
Status: {{ vm_creation_result.msg | default('Created') }}
- name: Provision QEMU VM (cloud-init)
ansible.builtin.include_tasks: kvm.yml
when: (proxmox_guest_type | default('kvm')) != 'lxc'

11
site.yml Normal file
View File

@ -0,0 +1,11 @@
---
# Wrapper playbook
# Purpose: allow running from repo root:
# ansible-playbook -i inventories/production site.yml
#
# This delegates to the main site playbook under playbooks/.
- name: Main site
import_playbook: playbooks/site.yml