Add Tailscale role and playbook for VPN setup across all machines. Update inventory to include Tailscale hosts and enhance Makefile with Tailscale-specific commands. Introduce documentation for Tailscale setup and Ansible Vault usage to securely manage authentication keys. Ensure compatibility with Debian, Ubuntu, and Alpine systems through role-specific tasks.
This commit is contained in:
parent
f85945c8f7
commit
00d660201a
181
Makefile
181
Makefile
@ -1,4 +1,4 @@
|
|||||||
.PHONY: help bootstrap lint test check apply dev local clean status
|
.PHONY: help bootstrap lint test check apply dev local clean status tailscale tailscale-check tailscale-dev tailscale-status create-vault
|
||||||
.DEFAULT_GOAL := help
|
.DEFAULT_GOAL := help
|
||||||
|
|
||||||
## Colors for output
|
## Colors for output
|
||||||
@ -9,6 +9,11 @@ YELLOW := \033[33m
|
|||||||
BLUE := \033[34m
|
BLUE := \033[34m
|
||||||
RESET := \033[0m
|
RESET := \033[0m
|
||||||
|
|
||||||
|
## Auto-detect current host to exclude from remote operations
|
||||||
|
CURRENT_IP := $(shell hostname -I | awk '{print $$1}')
|
||||||
|
CURRENT_HOST := $(shell ansible-inventory --list | jq -r '._meta.hostvars | to_entries[] | select(.value.ansible_host == "$(CURRENT_IP)") | .key' 2>/dev/null | head -1)
|
||||||
|
EXCLUDE_CURRENT := $(if $(CURRENT_HOST),--limit '!$(CURRENT_HOST)',)
|
||||||
|
|
||||||
help: ## Show this help message
|
help: ## Show this help message
|
||||||
@echo "$(BOLD)Ansible Development Environment$(RESET)"
|
@echo "$(BOLD)Ansible Development Environment$(RESET)"
|
||||||
@echo ""
|
@echo ""
|
||||||
@ -28,21 +33,50 @@ help: ## Show this help message
|
|||||||
@echo ""
|
@echo ""
|
||||||
|
|
||||||
bootstrap: ## Install required collections and dependencies
|
bootstrap: ## Install required collections and dependencies
|
||||||
@echo "$(YELLOW)Installing Ansible collections...$(RESET)"
|
@echo "$(BOLD)Installing Dependencies$(RESET)"
|
||||||
ansible-galaxy collection install -r collections/requirements.yml
|
@echo ""
|
||||||
@echo "$(GREEN)✓ Collections installed$(RESET)"
|
@echo "$(YELLOW)Ansible Collections:$(RESET)"
|
||||||
|
@ansible-galaxy collection install -r collections/requirements.yml 2>&1 | grep -E "(Installing|Skipping|ERROR)" | while read line; do \
|
||||||
|
if echo "$$line" | grep -q "Installing"; then \
|
||||||
|
collection=$$(echo "$$line" | awk '{print $$2}' | sed 's/:.*//'); \
|
||||||
|
printf " $(GREEN)✓ %-30s$(RESET) Installed\n" "$$collection"; \
|
||||||
|
elif echo "$$line" | grep -q "Skipping"; then \
|
||||||
|
collection=$$(echo "$$line" | awk '{print $$2}' | sed 's/,.*//'); \
|
||||||
|
printf " $(BLUE)- %-30s$(RESET) Already installed\n" "$$collection"; \
|
||||||
|
elif echo "$$line" | grep -q "ERROR"; then \
|
||||||
|
printf " $(RED)✗ Error: $$line$(RESET)\n"; \
|
||||||
|
fi; \
|
||||||
|
done || ansible-galaxy collection install -r collections/requirements.yml
|
||||||
|
@echo ""
|
||||||
|
|
||||||
lint: ## Run ansible-lint on all playbooks and roles
|
lint: ## Run ansible-lint on all playbooks and roles
|
||||||
@echo "$(YELLOW)Running ansible-lint...$(RESET)"
|
@echo "$(YELLOW)Running ansible-lint...$(RESET)"
|
||||||
ansible-lint
|
ansible-lint
|
||||||
@echo "$(GREEN)✓ Linting completed$(RESET)"
|
@echo "$(GREEN)✓ Linting completed$(RESET)"
|
||||||
|
|
||||||
test: lint ## Run all tests (lint + syntax check)
|
test-syntax: ## Run syntax check only
|
||||||
@echo "$(YELLOW)Testing playbook syntax...$(RESET)"
|
@echo "$(BOLD)Syntax Testing$(RESET)"
|
||||||
ansible-playbook dev-playbook.yml --syntax-check
|
@echo ""
|
||||||
ansible-playbook local-playbook.yml --syntax-check
|
@echo "$(YELLOW)Playbook Syntax:$(RESET)"
|
||||||
ansible-playbook maintenance-playbook.yml --syntax-check
|
@for playbook in dev-playbook.yml local-playbook.yml maintenance-playbook.yml tailscale-playbook.yml; do \
|
||||||
@echo "$(GREEN)✓ Syntax check passed$(RESET)"
|
if [ -f "$$playbook" ]; then \
|
||||||
|
printf " %-25s " "$$playbook"; \
|
||||||
|
if ansible-playbook "$$playbook" --syntax-check >/dev/null 2>&1; then \
|
||||||
|
echo "$(GREEN)✓ OK$(RESET)"; \
|
||||||
|
else \
|
||||||
|
echo "$(RED)✗ FAIL$(RESET)"; \
|
||||||
|
fi; \
|
||||||
|
fi; \
|
||||||
|
done
|
||||||
|
@echo ""
|
||||||
|
|
||||||
|
test: ## Run all tests (lint + syntax check if available)
|
||||||
|
@if command -v ansible-lint >/dev/null 2>&1; then \
|
||||||
|
echo "$(YELLOW)Running ansible-lint...$(RESET)"; \
|
||||||
|
ansible-lint 2>/dev/null && echo "$(GREEN)✓ Linting completed$(RESET)" || echo "$(YELLOW)⚠ Linting had issues$(RESET)"; \
|
||||||
|
echo ""; \
|
||||||
|
fi
|
||||||
|
@$(MAKE) test-syntax
|
||||||
|
|
||||||
check: ## Dry-run the development playbook (--check mode)
|
check: ## Dry-run the development playbook (--check mode)
|
||||||
@echo "$(YELLOW)Running dry-run on development hosts...$(RESET)"
|
@echo "$(YELLOW)Running dry-run on development hosts...$(RESET)"
|
||||||
@ -170,20 +204,36 @@ else ifdef GROUP
|
|||||||
done
|
done
|
||||||
else
|
else
|
||||||
@echo "$(YELLOW)Pinging all hosts...$(RESET)"
|
@echo "$(YELLOW)Pinging all hosts...$(RESET)"
|
||||||
@ansible all -m ping --one-line | while read line; do \
|
@if [ -n "$(CURRENT_HOST)" ]; then \
|
||||||
|
echo "$(BLUE)Auto-excluding current host: $(CURRENT_HOST) ($(CURRENT_IP))$(RESET)"; \
|
||||||
|
fi
|
||||||
|
@echo ""
|
||||||
|
@ansible all -m ping --one-line $(EXCLUDE_CURRENT) 2>/dev/null | grep -E "(SUCCESS|UNREACHABLE)" > /tmp/ping_results.tmp; \
|
||||||
|
success_count=$$(grep -c "SUCCESS" /tmp/ping_results.tmp 2>/dev/null || echo 0); \
|
||||||
|
fail_count=$$(grep -c "UNREACHABLE" /tmp/ping_results.tmp 2>/dev/null || echo 0); \
|
||||||
|
while read line; do \
|
||||||
|
host=$$(echo "$$line" | cut -d' ' -f1); \
|
||||||
if echo "$$line" | grep -q "SUCCESS"; then \
|
if echo "$$line" | grep -q "SUCCESS"; then \
|
||||||
echo "$(GREEN)✓ $$line$(RESET)"; \
|
printf "$(GREEN) ✓ %-20s$(RESET) Connected\n" "$$host"; \
|
||||||
elif echo "$$line" | grep -q "UNREACHABLE"; then \
|
elif echo "$$line" | grep -q "UNREACHABLE"; then \
|
||||||
echo "$(RED)✗ $$line$(RESET)"; \
|
reason=$$(echo "$$line" | grep -o "Permission denied.*" | head -1 || echo "Connection failed"); \
|
||||||
else \
|
printf "$(RED) ✗ %-20s$(RESET) $$reason\n" "$$host"; \
|
||||||
echo "$(YELLOW)? $$line$(RESET)"; \
|
|
||||||
fi; \
|
fi; \
|
||||||
done
|
done < /tmp/ping_results.tmp; \
|
||||||
|
rm -f /tmp/ping_results.tmp; \
|
||||||
|
echo ""; \
|
||||||
|
printf "$(BOLD)Summary:$(RESET) $(GREEN)$$success_count connected$(RESET), $(RED)$$fail_count failed$(RESET)\n"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
facts: ## Gather facts from all hosts
|
facts: ## Gather facts from all hosts
|
||||||
@echo "$(YELLOW)Gathering facts...$(RESET)"
|
@echo "$(YELLOW)Gathering facts...$(RESET)"
|
||||||
ansible all -m setup --tree /tmp/facts
|
ansible all -m setup --tree /tmp/facts $(EXCLUDE_CURRENT)
|
||||||
|
|
||||||
|
show-current: ## Show current host that will be auto-excluded
|
||||||
|
@echo "$(BOLD)Current Host Detection:$(RESET)"
|
||||||
|
@echo " Current IP: $(BLUE)$(CURRENT_IP)$(RESET)"
|
||||||
|
@echo " Current Host: $(BLUE)$(CURRENT_HOST)$(RESET)"
|
||||||
|
@echo " Exclude Flag: $(BLUE)$(EXCLUDE_CURRENT)$(RESET)"
|
||||||
|
|
||||||
clean: ## Clean up ansible artifacts
|
clean: ## Clean up ansible artifacts
|
||||||
@echo "$(YELLOW)Cleaning up artifacts...$(RESET)"
|
@echo "$(YELLOW)Cleaning up artifacts...$(RESET)"
|
||||||
@ -204,6 +254,47 @@ verbose: ## Run with verbose output
|
|||||||
quick: test check ## Quick test and check before applying
|
quick: test check ## Quick test and check before applying
|
||||||
@echo "$(GREEN)✓ Ready to apply changes$(RESET)"
|
@echo "$(GREEN)✓ Ready to apply changes$(RESET)"
|
||||||
|
|
||||||
|
# Tailscale management
|
||||||
|
tailscale: ## Install Tailscale on all machines
|
||||||
|
@echo "$(YELLOW)Installing Tailscale on all machines...$(RESET)"
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml
|
||||||
|
@echo "$(GREEN)✓ Tailscale installation complete$(RESET)"
|
||||||
|
|
||||||
|
tailscale-check: ## Check Tailscale installation (dry-run)
|
||||||
|
@echo "$(YELLOW)Checking Tailscale installation...$(RESET)"
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --check --diff
|
||||||
|
@echo "$(GREEN)✓ Tailscale check complete$(RESET)"
|
||||||
|
|
||||||
|
tailscale-dev: ## Install Tailscale on dev machines only
|
||||||
|
@echo "$(YELLOW)Installing Tailscale on dev machines...$(RESET)"
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --limit dev
|
||||||
|
@echo "$(GREEN)✓ Tailscale installation on dev machines complete$(RESET)"
|
||||||
|
|
||||||
|
tailscale-status: ## Check Tailscale status on all machines
|
||||||
|
@echo "$(BOLD)Tailscale Status$(RESET)"
|
||||||
|
@if [ -n "$(CURRENT_HOST)" ]; then \
|
||||||
|
echo "$(BLUE)Auto-excluding current host: $(CURRENT_HOST) ($(CURRENT_IP))$(RESET)"; \
|
||||||
|
fi
|
||||||
|
@echo ""
|
||||||
|
@ansible all -i hosts -m shell -a "tailscale status --json | jq -r '.Self.DNSName + \" (\" + .Self.TailscaleIPs[0] + \") - \" + .BackendState'" --become $(EXCLUDE_CURRENT) 2>/dev/null | while read line; do \
|
||||||
|
host=$$(echo "$$line" | cut -d' ' -f1); \
|
||||||
|
status=$$(echo "$$line" | grep -o "Running\|Stopped\|NeedsLogin" || echo "Unknown"); \
|
||||||
|
ip=$$(echo "$$line" | grep -o "100\.[0-9.]*" || echo "No IP"); \
|
||||||
|
if echo "$$line" | grep -q "SUCCESS"; then \
|
||||||
|
result=$$(echo "$$line" | cut -d'>' -f2 | tr -d ' "'); \
|
||||||
|
printf " $(GREEN)✓ %-20s$(RESET) %s\n" "$$host" "$$result"; \
|
||||||
|
elif echo "$$line" | grep -q "FAILED\|UNREACHABLE"; then \
|
||||||
|
printf " $(RED)✗ %-20s$(RESET) Not available\n" "$$host"; \
|
||||||
|
fi; \
|
||||||
|
done || ansible all -i hosts -m shell -a "tailscale status" --become $(EXCLUDE_CURRENT) 2>/dev/null | grep -E "(SUCCESS|FAILED)" | while read line; do \
|
||||||
|
host=$$(echo "$$line" | cut -d' ' -f1); \
|
||||||
|
if echo "$$line" | grep -q "SUCCESS"; then \
|
||||||
|
printf " $(GREEN)✓ %-20s$(RESET) Connected\n" "$$host"; \
|
||||||
|
else \
|
||||||
|
printf " $(RED)✗ %-20s$(RESET) Failed\n" "$$host"; \
|
||||||
|
fi; \
|
||||||
|
done
|
||||||
|
|
||||||
# Vault management
|
# Vault management
|
||||||
edit-vault: ## Edit encrypted host vars (usage: make edit-vault HOST=dev01)
|
edit-vault: ## Edit encrypted host vars (usage: make edit-vault HOST=dev01)
|
||||||
ifndef HOST
|
ifndef HOST
|
||||||
@ -216,49 +307,42 @@ endif
|
|||||||
|
|
||||||
|
|
||||||
test-connectivity: ## Test network connectivity and SSH access to all hosts
|
test-connectivity: ## Test network connectivity and SSH access to all hosts
|
||||||
@echo "$(BOLD)Testing Connectivity to All Hosts$(RESET)"
|
@echo "$(BOLD)Connectivity Test$(RESET)"
|
||||||
|
@if [ -n "$(CURRENT_HOST)" ]; then \
|
||||||
|
echo "$(BLUE)Auto-excluding current host: $(CURRENT_HOST) ($(CURRENT_IP))$(RESET)"; \
|
||||||
|
fi
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "$(YELLOW)1. Testing network connectivity (ping)...$(RESET)"
|
@echo "$(YELLOW)Network Connectivity:$(RESET)"
|
||||||
@for host in giteaVM portainerVM homepageVM dev01 bottom debianDesktopVM; do \
|
@ansible-inventory --list | jq -r '._meta.hostvars | to_entries[] | select(.value.ansible_host) | "\(.key) \(.value.ansible_host)"' 2>/dev/null | while read host ip; do \
|
||||||
ip=$$(ansible-inventory --list | jq -r ".$$host.ansible_host // empty" 2>/dev/null || echo "unknown"); \
|
if [ "$$host" != "$(CURRENT_HOST)" ]; then \
|
||||||
if [ "$$ip" != "unknown" ] && [ "$$ip" != "null" ] && [ "$$ip" != "" ]; then \
|
printf " %-20s " "$$host ($$ip)"; \
|
||||||
echo -n " $$host ($$ip): "; \
|
|
||||||
if ping -c 1 -W 2 $$ip >/dev/null 2>&1; then \
|
if ping -c 1 -W 2 $$ip >/dev/null 2>&1; then \
|
||||||
echo "$(GREEN)✓ Network OK$(RESET)"; \
|
echo "$(GREEN)✓ Network OK$(RESET)"; \
|
||||||
else \
|
else \
|
||||||
echo "$(RED)✗ Network FAIL$(RESET)"; \
|
echo "$(RED)✗ Network FAIL$(RESET)"; \
|
||||||
fi; \
|
fi; \
|
||||||
else \
|
|
||||||
echo " $$host: $(YELLOW)? IP not found in inventory$(RESET)"; \
|
|
||||||
fi; \
|
fi; \
|
||||||
done
|
done
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "$(YELLOW)2. Testing SSH connectivity...$(RESET)"
|
@echo "$(YELLOW)SSH Connectivity:$(RESET)"
|
||||||
@ansible all -m ping --one-line 2>/dev/null | while read line; do \
|
@ansible all -m ping --one-line $(EXCLUDE_CURRENT) 2>/dev/null | grep -E "(SUCCESS|UNREACHABLE)" | while read line; do \
|
||||||
|
host=$$(echo "$$line" | cut -d' ' -f1); \
|
||||||
if echo "$$line" | grep -q "SUCCESS"; then \
|
if echo "$$line" | grep -q "SUCCESS"; then \
|
||||||
echo " $(GREEN)✓ $$line$(RESET)"; \
|
printf " $(GREEN)✓ %-20s$(RESET) SSH OK\n" "$$host"; \
|
||||||
elif echo "$$line" | grep -q "UNREACHABLE"; then \
|
elif echo "$$line" | grep -q "UNREACHABLE"; then \
|
||||||
echo " $(RED)✗ $$line$(RESET)"; \
|
printf " $(RED)✗ %-20s$(RESET) SSH FAIL\n" "$$host"; \
|
||||||
else \
|
|
||||||
echo " $(YELLOW)? $$line$(RESET)"; \
|
|
||||||
fi; \
|
fi; \
|
||||||
done || true
|
done
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "$(YELLOW)3. SSH key status...$(RESET)"
|
@echo "$(YELLOW)SSH Keys:$(RESET)"
|
||||||
@if [ -f ~/.ssh/id_rsa.pub ]; then \
|
@if [ -f ~/.ssh/id_ed25519.pub ]; then \
|
||||||
echo " $(GREEN)✓ SSH public key found: ~/.ssh/id_rsa.pub$(RESET)"; \
|
echo " $(GREEN)✓ SSH key available$(RESET) (id_ed25519)"; \
|
||||||
elif [ -f ~/.ssh/id_ed25519.pub ]; then \
|
elif [ -f ~/.ssh/id_rsa.pub ]; then \
|
||||||
echo " $(GREEN)✓ SSH public key found: ~/.ssh/id_ed25519.pub$(RESET)"; \
|
echo " $(GREEN)✓ SSH key available$(RESET) (id_rsa)"; \
|
||||||
else \
|
else \
|
||||||
echo " $(RED)✗ No SSH public key found$(RESET)"; \
|
echo " $(RED)✗ No SSH key found$(RESET)"; \
|
||||||
echo " $(YELLOW) Run: ssh-keygen -t ed25519 -C 'your_email@example.com'$(RESET)"; \
|
echo " $(YELLOW) Run: ssh-keygen -t ed25519$(RESET)"; \
|
||||||
fi
|
fi
|
||||||
@echo ""
|
|
||||||
@echo "$(BOLD)Troubleshooting Tips:$(RESET)"
|
|
||||||
@echo " • For network failures: Check if VMs are running and IPs are correct"
|
|
||||||
@echo " • For SSH failures: Copy your SSH key to the target hosts"
|
|
||||||
@echo " • Run: ssh-copy-id user@host (for each failing host)"
|
|
||||||
@echo " • Or: make copy-ssh-key HOST=hostname"
|
|
||||||
|
|
||||||
copy-ssh-key: ## Copy SSH key to specific host (usage: make copy-ssh-key HOST=giteaVM)
|
copy-ssh-key: ## Copy SSH key to specific host (usage: make copy-ssh-key HOST=giteaVM)
|
||||||
ifndef HOST
|
ifndef HOST
|
||||||
@ -275,4 +359,9 @@ endif
|
|||||||
else \
|
else \
|
||||||
echo "$(RED)Could not determine IP or user for $(HOST)$(RESET)"; \
|
echo "$(RED)Could not determine IP or user for $(HOST)$(RESET)"; \
|
||||||
echo "Check your inventory and host_vars"; \
|
echo "Check your inventory and host_vars"; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
create-vault: ## Create encrypted vault file for secrets (passwords, auth keys, etc.)
|
||||||
|
@echo "$(YELLOW)Creating vault file for storing secrets...$(RESET)"
|
||||||
|
ansible-vault create group_vars/all/vault.yml
|
||||||
|
@echo "$(GREEN)✓ Vault file created. Add your secrets here (e.g. vault_tailscale_auth_key)$(RESET)"
|
||||||
61
SIMPLE_SETUP.md
Normal file
61
SIMPLE_SETUP.md
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
# Simple Tailscale Setup
|
||||||
|
|
||||||
|
## What you need:
|
||||||
|
1. A Tailscale account (free at https://tailscale.com)
|
||||||
|
2. An auth key from your Tailscale admin console
|
||||||
|
|
||||||
|
## 3-Step Setup:
|
||||||
|
|
||||||
|
### Step 1: Get your auth key
|
||||||
|
1. Go to https://login.tailscale.com/admin/settings/keys
|
||||||
|
2. Click "Generate auth key"
|
||||||
|
3. Make it **Reusable** and set expiration to **90 days** (or longer)
|
||||||
|
4. Copy the key (starts with `tskey-auth-`)
|
||||||
|
|
||||||
|
### Step 2: Store the key securely
|
||||||
|
```bash
|
||||||
|
make create-vault
|
||||||
|
```
|
||||||
|
When prompted, add this content:
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
vault_tailscale_auth_key: "tskey-auth-your-actual-key-here"
|
||||||
|
```
|
||||||
|
Save and exit.
|
||||||
|
|
||||||
|
### Step 3: Install Tailscale everywhere
|
||||||
|
```bash
|
||||||
|
# Check what will happen (dry run)
|
||||||
|
make tailscale-check
|
||||||
|
|
||||||
|
# Install on all machines
|
||||||
|
make tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
That's it! Your machines should now be connected to your Tailscale network.
|
||||||
|
|
||||||
|
## Check if it worked:
|
||||||
|
```bash
|
||||||
|
make tailscale-status
|
||||||
|
```
|
||||||
|
|
||||||
|
## How the vault connects to your settings:
|
||||||
|
|
||||||
|
The `group_vars/all.yml` file now contains:
|
||||||
|
```yaml
|
||||||
|
tailscale_auth_key: "{{ vault_tailscale_auth_key | default('') }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
This tells Ansible: "Look for `vault_tailscale_auth_key` in the encrypted vault file, and if it's not there, use an empty string."
|
||||||
|
|
||||||
|
So when you put your real auth key in the vault, it automatically gets used!
|
||||||
|
|
||||||
|
## The confusing variables explained:
|
||||||
|
|
||||||
|
- `tailscale_auth_key`: **YOU NEED THIS** - your authentication key
|
||||||
|
- `tailscale_ssh`: **USEFUL** - lets you SSH through Tailscale network
|
||||||
|
- `tailscale_accept_routes`: **USEFUL** - access other networks through Tailscale
|
||||||
|
- `tailscale_hostname`: **OPTIONAL** - custom name (defaults to your server names)
|
||||||
|
- `tailscale_advertise_routes`: **ADVANCED** - share your local network with others
|
||||||
|
- `tailscale_shields_up`: **SECURITY** - blocks incoming connections
|
||||||
|
- Everything else: **IGNORE** unless you have specific enterprise needs
|
||||||
108
TAILSCALE_SETUP.md
Normal file
108
TAILSCALE_SETUP.md
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
# Tailscale Setup Guide
|
||||||
|
|
||||||
|
This guide will help you deploy Tailscale across all your machines using Ansible.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
1. **Get your Tailscale auth key**:
|
||||||
|
- Go to https://login.tailscale.com/admin/settings/keys
|
||||||
|
- Generate a new auth key (preferably reusable and non-expiring for automation)
|
||||||
|
|
||||||
|
2. **Store the auth key securely**:
|
||||||
|
```bash
|
||||||
|
make create-vault
|
||||||
|
```
|
||||||
|
Add this content to the vault file:
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
vault_tailscale_auth_key: "tskey-auth-your-actual-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Install Tailscale on all machines**:
|
||||||
|
```bash
|
||||||
|
# Dry run first to check what will happen
|
||||||
|
make tailscale-check
|
||||||
|
|
||||||
|
# Install on all machines
|
||||||
|
make tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
## Available Commands
|
||||||
|
|
||||||
|
- `make tailscale` - Install Tailscale on all machines
|
||||||
|
- `make tailscale-check` - Dry run to see what changes will be made
|
||||||
|
- `make tailscale-dev` - Install only on dev machines
|
||||||
|
- `make tailscale-status` - Check Tailscale status on all machines
|
||||||
|
|
||||||
|
## Manual Installation Options
|
||||||
|
|
||||||
|
### Install on specific machines:
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --limit "devVM,bottom"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install with custom auth key:
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --extra-vars "tailscale_auth_key=your-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install as part of existing playbooks:
|
||||||
|
The Tailscale role has been added to both `dev-playbook.yml` and `local-playbook.yml` with the tag `tailscale`.
|
||||||
|
|
||||||
|
Run only Tailscale tasks:
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i hosts dev-playbook.yml --tags tailscale
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
You can customize Tailscale behavior by setting these variables in `group_vars/all.yml` or `host_vars/hostname.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tailscale_auth_key: "{{ vault_tailscale_auth_key }}" # Auth key from vault
|
||||||
|
tailscale_hostname: "{{ inventory_hostname }}" # Custom hostname
|
||||||
|
tailscale_accept_routes: true # Accept subnet routes
|
||||||
|
tailscale_accept_dns: true # Accept DNS settings
|
||||||
|
tailscale_ssh: true # Enable SSH server
|
||||||
|
tailscale_advertise_routes: "192.168.1.0/24" # Advertise subnets
|
||||||
|
tailscale_shields_up: false # Block incoming connections
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Check if Tailscale is running:
|
||||||
|
```bash
|
||||||
|
make tailscale-status
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manual connection (if auth key wasn't provided):
|
||||||
|
```bash
|
||||||
|
# SSH to the machine and run:
|
||||||
|
sudo tailscale up
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reset connection:
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --extra-vars "tailscale_reset=true"
|
||||||
|
```
|
||||||
|
|
||||||
|
### View logs:
|
||||||
|
```bash
|
||||||
|
# On the target machine:
|
||||||
|
sudo journalctl -u tailscaled -f
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Notes
|
||||||
|
|
||||||
|
- Store your Tailscale auth key in Ansible Vault for security
|
||||||
|
- Consider using ephemeral auth keys for one-time setups
|
||||||
|
- The role enables SSH by default - disable if not needed
|
||||||
|
- Machines will need to be authorized in your Tailscale admin console
|
||||||
|
|
||||||
|
## Supported Operating Systems
|
||||||
|
|
||||||
|
- Ubuntu (focal, jammy, noble)
|
||||||
|
- Debian (bullseye, bookworm, trixie)
|
||||||
|
- Alpine Linux (all versions)
|
||||||
|
|
||||||
|
The role automatically detects the OS and uses the appropriate package manager (apt for Ubuntu/Debian, apk for Alpine).
|
||||||
76
VAULT_EXPLAINED.md
Normal file
76
VAULT_EXPLAINED.md
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# How Ansible Vault Works
|
||||||
|
|
||||||
|
## The Problem:
|
||||||
|
You need to store secrets (like Tailscale auth keys) but don't want them visible in your code.
|
||||||
|
|
||||||
|
## The Solution:
|
||||||
|
Ansible Vault encrypts files so secrets are hidden but still usable.
|
||||||
|
|
||||||
|
## Here's how it works:
|
||||||
|
|
||||||
|
### 1. Create the encrypted vault file:
|
||||||
|
```bash
|
||||||
|
make create-vault
|
||||||
|
```
|
||||||
|
This creates `group_vars/all/vault.yml` (encrypted) and asks for a password.
|
||||||
|
|
||||||
|
### 2. Add your secrets to the vault:
|
||||||
|
When the editor opens, add:
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
vault_tailscale_auth_key: "tskey-auth-your-actual-key-here"
|
||||||
|
vault_database_password: "super-secret-password"
|
||||||
|
vault_api_key: "another-secret"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Reference secrets in your code:
|
||||||
|
In any playbook or role, use:
|
||||||
|
```yaml
|
||||||
|
tailscale_auth_key: "{{ vault_tailscale_auth_key }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Structure:
|
||||||
|
```
|
||||||
|
group_vars/
|
||||||
|
├── all.yml # Plain text settings (everyone can see)
|
||||||
|
└── all/
|
||||||
|
└── vault.yml # Encrypted secrets (protected)
|
||||||
|
```
|
||||||
|
|
||||||
|
## How Ansible finds the auth key:
|
||||||
|
|
||||||
|
1. **Playbook runs** → looks for `tailscale_auth_key` variable
|
||||||
|
2. **Checks `all.yml`** → finds reference to `{{ vault_tailscale_auth_key }}`
|
||||||
|
3. **Checks `all/vault.yml`** → finds the encrypted auth key
|
||||||
|
4. **Decrypts and uses it** → connects to Tailscale
|
||||||
|
|
||||||
|
## Commands:
|
||||||
|
```bash
|
||||||
|
# Create new vault
|
||||||
|
make create-vault
|
||||||
|
|
||||||
|
# Edit existing vault
|
||||||
|
ansible-vault edit group_vars/all/vault.yml
|
||||||
|
|
||||||
|
# View vault contents (decrypted)
|
||||||
|
ansible-vault view group_vars/all/vault.yml
|
||||||
|
|
||||||
|
# Run playbooks (will ask for vault password)
|
||||||
|
make tailscale
|
||||||
|
# OR provide password file
|
||||||
|
ansible-playbook -i hosts tailscale-playbook.yml --vault-password-file ~/.vault_pass
|
||||||
|
```
|
||||||
|
|
||||||
|
## No code changes needed!
|
||||||
|
The playbook already looks for `vault_tailscale_auth_key` - just put your real key in the vault and it works automatically.
|
||||||
|
|
||||||
|
## What's NOT in the vault:
|
||||||
|
- Regular settings (in `all.yml`)
|
||||||
|
- Non-sensitive configuration
|
||||||
|
- Public information
|
||||||
|
|
||||||
|
## What IS in the vault:
|
||||||
|
- Auth keys
|
||||||
|
- Passwords
|
||||||
|
- Private keys
|
||||||
|
- Any sensitive data
|
||||||
@ -12,6 +12,7 @@
|
|||||||
- { role: docker, tags: ['docker'] }
|
- { role: docker, tags: ['docker'] }
|
||||||
- { role: applications, tags: ['applications', 'apps'] }
|
- { role: applications, tags: ['applications', 'apps'] }
|
||||||
- { role: snap, tags: ['snap', 'apps'] }
|
- { role: snap, tags: ['snap', 'apps'] }
|
||||||
|
- { role: tailscale, tags: ['tailscale', 'vpn'] }
|
||||||
|
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Update apt cache
|
- name: Update apt cache
|
||||||
|
|||||||
@ -16,3 +16,17 @@ fail2ban_maxretry: 3
|
|||||||
maintenance_default_serial: "100%" # Default serial execution for maintenance
|
maintenance_default_serial: "100%" # Default serial execution for maintenance
|
||||||
maintenance_reboot_timeout: 300 # Reboot timeout in seconds
|
maintenance_reboot_timeout: 300 # Reboot timeout in seconds
|
||||||
maintenance_pre_reboot_delay: 5 # Delay before reboot in seconds
|
maintenance_pre_reboot_delay: 5 # Delay before reboot in seconds
|
||||||
|
|
||||||
|
# Global variables for all hosts
|
||||||
|
|
||||||
|
# Tailscale configuration
|
||||||
|
# Store your actual auth key in vault_tailscale_auth_key using ansible-vault
|
||||||
|
# Example: ansible-vault create group_vars/all/vault.yml
|
||||||
|
# vault_tailscale_auth_key: "tskey-auth-your-actual-key-here"
|
||||||
|
|
||||||
|
# Default Tailscale settings - these tell the playbook to use your vault key
|
||||||
|
tailscale_auth_key: "{{ vault_tailscale_auth_key | default('') }}"
|
||||||
|
tailscale_accept_routes: true
|
||||||
|
tailscale_accept_dns: true
|
||||||
|
tailscale_ssh: true
|
||||||
|
tailscale_hostname: "{{ inventory_hostname }}"
|
||||||
6
hosts
6
hosts
@ -7,6 +7,9 @@ portainerVM ansible_host=10.0.30.69 ansible_user=ladmin
|
|||||||
[homepage]
|
[homepage]
|
||||||
homepageVM ansible_host=10.0.30.12 ansible_user=homepage
|
homepageVM ansible_host=10.0.30.12 ansible_user=homepage
|
||||||
|
|
||||||
|
[vaultwarden]
|
||||||
|
vaultwardenVM ansible_host=100.100.19.11 ansible_user=ladmin
|
||||||
|
|
||||||
[dev]
|
[dev]
|
||||||
dev01 ansible_host=10.0.30.105 ansible_user=ladmin
|
dev01 ansible_host=10.0.30.105 ansible_user=ladmin
|
||||||
bottom ansible_host=10.0.10.156 ansible_user=beast
|
bottom ansible_host=10.0.10.156 ansible_user=beast
|
||||||
@ -15,5 +18,8 @@ debianDesktopVM ansible_host=10.0.10.206 ansible_user=user skip_reboot=true
|
|||||||
[ansible]
|
[ansible]
|
||||||
ansible-controlVM ansible_host=10.0.10.157 ansible_user=master
|
ansible-controlVM ansible_host=10.0.10.157 ansible_user=master
|
||||||
|
|
||||||
|
[tailscale]
|
||||||
|
tailscaleVM ansible_host=100.66.218.53 ansible_user=ladmin
|
||||||
|
|
||||||
[local]
|
[local]
|
||||||
localhost ansible_connection=local
|
localhost ansible_connection=local
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
- { role: docker, tags: ['docker'] }
|
- { role: docker, tags: ['docker'] }
|
||||||
- { role: applications, tags: ['applications', 'apps'] }
|
- { role: applications, tags: ['applications', 'apps'] }
|
||||||
- { role: snap, tags: ['snap', 'apps'] }
|
- { role: snap, tags: ['snap', 'apps'] }
|
||||||
|
- { role: tailscale, tags: ['tailscale', 'vpn'] }
|
||||||
|
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Update apt cache
|
- name: Update apt cache
|
||||||
|
|||||||
20
roles/tailscale/defaults/main.yml
Normal file
20
roles/tailscale/defaults/main.yml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
# Default variables for Tailscale role
|
||||||
|
|
||||||
|
# REQUIRED: Your Tailscale auth key (store in vault for security)
|
||||||
|
tailscale_auth_key: ""
|
||||||
|
|
||||||
|
# COMMON OPTIONS:
|
||||||
|
tailscale_hostname: "" # Custom hostname (default: uses inventory name)
|
||||||
|
tailscale_ssh: true # Enable SSH access through Tailscale
|
||||||
|
tailscale_accept_routes: true # Accept subnet routes from other nodes
|
||||||
|
tailscale_accept_dns: true # Accept DNS settings from Tailscale
|
||||||
|
|
||||||
|
# ADVANCED OPTIONS (usually not needed):
|
||||||
|
tailscale_advertise_routes: "" # Advertise subnets (e.g., "192.168.1.0/24")
|
||||||
|
tailscale_shields_up: false # Block all incoming connections
|
||||||
|
tailscale_reset: false # Force reconnection (will logout first)
|
||||||
|
|
||||||
|
# ENTERPRISE OPTIONS (leave empty for personal use):
|
||||||
|
tailscale_login_server: "" # Custom control server URL
|
||||||
|
tailscale_operator: "" # Operator user for Tailscale
|
||||||
13
roles/tailscale/handlers/main.yml
Normal file
13
roles/tailscale/handlers/main.yml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
- name: start tailscaled
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: tailscaled
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: restart tailscaled
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: tailscaled
|
||||||
|
state: restarted
|
||||||
|
become: true
|
||||||
41
roles/tailscale/tasks/alpine.yml
Normal file
41
roles/tailscale/tasks/alpine.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
- name: Install required packages for Alpine
|
||||||
|
ansible.builtin.apk:
|
||||||
|
name:
|
||||||
|
- curl
|
||||||
|
- gnupg
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Add Tailscale repository key (Alpine)
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://pkgs.tailscale.com/stable/alpine/tailscale.rsa.pub
|
||||||
|
dest: /etc/apk/keys/tailscale.rsa.pub
|
||||||
|
mode: '0644'
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Add Tailscale repository (Alpine)
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/apk/repositories
|
||||||
|
line: "https://pkgs.tailscale.com/stable/alpine/any-version/main"
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Update apk cache
|
||||||
|
ansible.builtin.apk:
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Install Tailscale
|
||||||
|
ansible.builtin.apk:
|
||||||
|
name: tailscale
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
notify: start tailscaled
|
||||||
|
|
||||||
|
- name: Enable and start Tailscale daemon
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: tailscaled
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
37
roles/tailscale/tasks/debian.yml
Normal file
37
roles/tailscale/tasks/debian.yml
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
- name: Add Tailscale GPG key
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://pkgs.tailscale.com/stable/ubuntu/jammy.noarmor.gpg
|
||||||
|
dest: /usr/share/keyrings/tailscale-archive-keyring.gpg
|
||||||
|
mode: '0644'
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Determine repository codename
|
||||||
|
set_fact:
|
||||||
|
tailscale_repo_codename: "{{ 'jammy' if ansible_distribution == 'Ubuntu' else 'bookworm' }}"
|
||||||
|
|
||||||
|
- name: Add Tailscale repository
|
||||||
|
ansible.builtin.apt_repository:
|
||||||
|
repo: "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/{{ 'ubuntu' if ansible_distribution == 'Ubuntu' else 'debian' }} {{ tailscale_repo_codename }} main"
|
||||||
|
state: present
|
||||||
|
filename: tailscale
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Update apt cache
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Install Tailscale
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: tailscale
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
notify: start tailscaled
|
||||||
|
|
||||||
|
- name: Enable and start Tailscale daemon
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: tailscaled
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
50
roles/tailscale/tasks/main.yml
Normal file
50
roles/tailscale/tasks/main.yml
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
- name: Include OS-specific installation tasks
|
||||||
|
include_tasks: "{{ ansible_os_family | lower }}.yml"
|
||||||
|
|
||||||
|
- name: Fail if not running on supported OS
|
||||||
|
fail:
|
||||||
|
msg: "This role only supports Ubuntu, Debian, and Alpine systems"
|
||||||
|
when: ansible_os_family not in ["Debian", "Alpine"]
|
||||||
|
|
||||||
|
- name: Check if Tailscale is already connected
|
||||||
|
command: tailscale status --json
|
||||||
|
register: tailscale_status
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Parse Tailscale status
|
||||||
|
set_fact:
|
||||||
|
tailscale_connected: "{{ (tailscale_status.stdout | from_json).BackendState == 'Running' if tailscale_status.rc == 0 else false }}"
|
||||||
|
|
||||||
|
- name: Reset Tailscale if requested
|
||||||
|
command: tailscale logout
|
||||||
|
when:
|
||||||
|
- tailscale_reset | bool
|
||||||
|
- tailscale_connected | bool
|
||||||
|
notify: restart tailscaled
|
||||||
|
|
||||||
|
- name: Connect to Tailscale network
|
||||||
|
command: >
|
||||||
|
tailscale up
|
||||||
|
{{ '--auth-key=' + tailscale_auth_key if tailscale_auth_key else '' }}
|
||||||
|
{{ '--hostname=' + tailscale_hostname if tailscale_hostname else '' }}
|
||||||
|
{{ '--advertise-routes=' + tailscale_advertise_routes if tailscale_advertise_routes else '' }}
|
||||||
|
{{ '--accept-routes' if tailscale_accept_routes else '--accept-routes=false' }}
|
||||||
|
{{ '--accept-dns' if tailscale_accept_dns else '--accept-dns=false' }}
|
||||||
|
{{ '--shields-up' if tailscale_shields_up else '' }}
|
||||||
|
{{ '--login-server=' + tailscale_login_server if tailscale_login_server else '' }}
|
||||||
|
{{ '--operator=' + tailscale_operator if tailscale_operator else '' }}
|
||||||
|
{{ '--ssh' if tailscale_ssh else '' }}
|
||||||
|
when: not tailscale_connected or tailscale_reset
|
||||||
|
register: tailscale_up_result
|
||||||
|
changed_when: tailscale_up_result.rc == 0
|
||||||
|
|
||||||
|
- name: Display Tailscale status
|
||||||
|
command: tailscale status
|
||||||
|
register: final_status
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Show Tailscale connection status
|
||||||
|
debug:
|
||||||
|
msg: "{{ final_status.stdout_lines }}"
|
||||||
31
tailscale-playbook.yml
Normal file
31
tailscale-playbook.yml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- name: Install and configure Tailscale on all machines
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Variables are set in group_vars/all.yml
|
||||||
|
# Override here if needed or pass via: --extra-vars "tailscale_auth_key=your_key"
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Update package cache (Debian/Ubuntu)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- { role: tailscale, tags: ['tailscale', 'vpn'] }
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Display Tailscale installation completion
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
Tailscale has been installed and configured on {{ inventory_hostname }}.
|
||||||
|
|
||||||
|
To connect this machine to your Tailscale network:
|
||||||
|
1. If you provided an auth key, the machine should already be connected
|
||||||
|
2. If no auth key was provided, run: sudo tailscale up
|
||||||
|
3. Check status with: tailscale status
|
||||||
|
|
||||||
|
Remember to authorize the machine in your Tailscale admin console if needed.
|
||||||
Loading…
x
Reference in New Issue
Block a user