added support for Windows / ActiveDirectory with nebula compatibility

This commit is contained in:
Damien Coles 2026-02-10 09:24:01 -05:00
parent 0c77379ab3
commit 6127347b07
6 changed files with 125 additions and 20 deletions

View File

@ -6,9 +6,10 @@ Production-grade infrastructure-as-code for running services on Proxmox with ent
Arvandor provides a complete infrastructure stack: Arvandor provides a complete infrastructure stack:
- **Terraform** - VM provisioning on Proxmox - **Terraform** - VM provisioning on Proxmox (Linux and Windows)
- **Ansible** - Configuration management - **Ansible** - Configuration management
- **Nebula** - Encrypted overlay network - **Nebula** - Encrypted overlay network
- **Active Directory** - Windows domain services (hybrid on-prem/cloud)
- **Vault** - Secrets management (3-node Raft cluster) - **Vault** - Secrets management (3-node Raft cluster)
- **PostgreSQL** - Database (3-node Patroni + etcd) - **PostgreSQL** - Database (3-node Patroni + etcd)
- **Valkey** - Cache/queue (3-node Sentinel) - **Valkey** - Cache/queue (3-node Sentinel)
@ -26,7 +27,7 @@ Arvandor provides a complete infrastructure stack:
│ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
│ │ DNS, Caddy │ │ Vault │ │ PostgreSQL │ │ Your Apps │ │ │ │ DNS, Caddy │ │ Vault │ │ PostgreSQL │ │ Your Apps │ │
│ │ Lighthouse │ │ Gitea │ │ Valkey │ │ │ │ │ │ Lighthouse │ │ Gitea │ │ Valkey │ │ │ │
│ │ │ │ │ │ Garage │ │ │ │ │ │ AD (DC, CA) │ │ │ │ Garage │ │ │ │
│ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ │ │ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ │
│ │ │ │ │ │ │ │ │ │ │ │
│ └────────────────┴────────────────┴────────────────┘ │ │ └────────────────┴────────────────┴────────────────┘ │
@ -41,6 +42,7 @@ Arvandor provides a complete infrastructure stack:
- Proxmox VE host - Proxmox VE host
- Arch Linux VM template (VMID 9000) - Arch Linux VM template (VMID 9000)
- Windows Server 2025 sysprepped VM template (VMID 10000, optional)
- Terraform, Ansible installed locally - Terraform, Ansible installed locally
- Nebula binary for certificate generation - Nebula binary for certificate generation
@ -96,13 +98,37 @@ ansible-playbook -i inventory.ini playbooks/valkey-sentinel.yml
ansible-playbook -i inventory.ini playbooks/garage.yml ansible-playbook -i inventory.ini playbooks/garage.yml
``` ```
### 5. Windows AD Infrastructure (Optional)
DC01 is provisioned manually (first domain controller bootstraps the forest). CA01 and RDS01 are provisioned via Terraform from a sysprepped Windows Server template.
```bash
# Create CA and RDS VMs from Windows template
cd terraform
terraform apply
# After OOBE, on each Windows VM:
# - Set static IP and DNS (point to DC01)
# - Join domain: Add-Computer -DomainName "yourdomain.internal" -Restart
# - Install roles:
# CA01: Install-WindowsFeature AD-Certificate-Services -IncludeManagementTools
# RDS01: Install-WindowsFeature RDS-Session-Host,FS-FileServer -IncludeManagementTools
```
Nebula runs on Windows as a service, providing the same encrypted overlay connectivity as Linux VMs. Install the Windows Nebula binary, sign a cert, and register as a service:
```powershell
nebula.exe -service install -config C:\nebula\config.yml
Start-Service nebula
```
## Directory Structure ## Directory Structure
``` ```
arvandor/ arvandor/
├── terraform/ # VM provisioning ├── terraform/ # VM provisioning (Linux + Windows)
│ ├── modules/vm/ # Reusable VM module │ ├── modules/vm/ # Reusable VM module (os_type: linux/windows)
│ ├── management.tf # DNS, Caddy │ ├── management.tf # DNS, Caddy, AD (DC, CA, RDS)
│ ├── services.tf # Vault, Gitea │ ├── services.tf # Vault, Gitea
│ └── data.tf # PostgreSQL, Valkey, Garage │ └── data.tf # PostgreSQL, Valkey, Garage
├── ansible/ # Configuration management ├── ansible/ # Configuration management
@ -135,6 +161,18 @@ VMs only accept traffic from the Proxmox host (for Ansible) and the Nebula overl
| `projects` | Application workloads | | `projects` | Application workloads |
| `games` | Isolated game servers | | `games` | Isolated game servers |
## Windows AD Integration
The VM module supports both Linux and Windows VMs via the `os_type` variable. Windows VMs use UEFI (OVMF), q35 machine type, and skip cloud-init initialization.
| VM | VMID | Role | Provisioning |
|----|------|------|-------------|
| dc01 | 1003 | Domain Controller | Manual (forest bootstrap) |
| ca01 | 1005 | Certificate Authority | Terraform + manual role install |
| rds01 | 1006 | Remote Desktop + File Server | Terraform + manual role install |
**Design:** DC01 is the only manually provisioned Windows VM (same pattern as the Nebula lighthouse). It bootstraps the AD forest, after which all other Windows VMs can be domain-joined. Nebula provides encrypted connectivity for AD traffic (Kerberos, LDAP, DNS) without exposing ports to the internet.
## Documentation ## Documentation
- [Getting Started](docs/getting-started.md) - Detailed setup guide - [Getting Started](docs/getting-started.md) - Detailed setup guide

View File

@ -14,6 +14,14 @@ local-data-ptr: "{{ lighthouse_nebula_ip }} lighthouse.nebula"
local-data: "proxmox.nebula. IN A 10.10.10.1" local-data: "proxmox.nebula. IN A 10.10.10.1"
local-data-ptr: "10.10.10.1 proxmox.nebula" local-data-ptr: "10.10.10.1 proxmox.nebula"
# Windows AD VMs (not in Ansible inventory, manually provisioned)
local-data: "dc01.nebula. IN A 10.10.10.13"
local-data-ptr: "10.10.10.13 dc01.nebula"
local-data: "ca01.nebula. IN A 10.10.10.15"
local-data-ptr: "10.10.10.15 ca01.nebula"
local-data: "rds01.nebula. IN A 10.10.10.16"
local-data-ptr: "10.10.10.16 rds01.nebula"
# All VMs from inventory # All VMs from inventory
{% for host in groups['all'] %} {% for host in groups['all'] %}
local-data: "{{ host }}.nebula. IN A {{ hostvars[host]['nebula_ip'] }}" local-data: "{{ host }}.nebula. IN A {{ hostvars[host]['nebula_ip'] }}"

View File

@ -7,6 +7,9 @@
# 1000 lighthouse 192.168.100.10 - Nebula lighthouse/relay # 1000 lighthouse 192.168.100.10 - Nebula lighthouse/relay
# 1001 dns 192.168.100.11 - Internal DNS server # 1001 dns 192.168.100.11 - Internal DNS server
# 1002 caddy 192.168.100.12 - Reverse proxy # 1002 caddy 192.168.100.12 - Reverse proxy
# 1003 dc01 192.168.100.13 - AD domain controller (manual)
# 1005 ca01 192.168.100.15 - AD Certificate Authority
# 1006 rds01 192.168.100.16 - Remote Desktop Services + File Server
module "dns" { module "dns" {
source = "./modules/vm" source = "./modules/vm"
@ -35,3 +38,31 @@ module "caddy" {
password = var.password password = var.password
ssh_key_path = var.ssh_key_path ssh_key_path = var.ssh_key_path
} }
module "ca01" {
source = "./modules/vm"
name = "ca01"
vmid = 1005
node_name = var.proxmox_node
bridge_ip = "192.168.100.15"
os_type = "windows"
datastore_id = var.datastore_id
clone_vmid = var.windows_template_vmid
cores = 2
memory = 4096
disk_size = 60
}
module "rds01" {
source = "./modules/vm"
name = "rds01"
vmid = 1006
node_name = var.proxmox_node
bridge_ip = "192.168.100.16"
os_type = "windows"
datastore_id = var.datastore_id
clone_vmid = var.windows_template_vmid
cores = 4
memory = 8192
disk_size = 100
}

View File

@ -11,6 +11,9 @@ resource "proxmox_virtual_environment_vm" "vm" {
node_name = var.node_name node_name = var.node_name
vm_id = var.vmid vm_id = var.vmid
machine = var.os_type == "windows" ? "q35" : null
bios = var.os_type == "windows" ? "ovmf" : null
clone { clone {
vm_id = var.clone_vmid vm_id = var.clone_vmid
} }
@ -32,11 +35,20 @@ resource "proxmox_virtual_environment_vm" "vm" {
size = var.disk_size size = var.disk_size
} }
dynamic "efi_disk" {
for_each = var.os_type == "windows" ? [1] : []
content {
datastore_id = var.datastore_id
}
}
network_device { network_device {
bridge = var.network_bridge bridge = var.network_bridge
} }
initialization { dynamic "initialization" {
for_each = var.os_type == "linux" ? [1] : []
content {
datastore_id = var.datastore_id datastore_id = var.datastore_id
ip_config { ip_config {
ipv4 { ipv4 {
@ -51,6 +63,7 @@ resource "proxmox_virtual_environment_vm" "vm" {
} }
} }
} }
}
# Firewall configuration - always manage options to explicitly enable/disable # Firewall configuration - always manage options to explicitly enable/disable
resource "proxmox_virtual_environment_firewall_options" "vm" { resource "proxmox_virtual_environment_firewall_options" "vm" {

View File

@ -3,6 +3,12 @@ variable "name" {
description = "VM name" description = "VM name"
} }
variable "os_type" {
type = string
default = "linux"
description = "OS type: linux or windows"
}
variable "vmid" { variable "vmid" {
type = number type = number
description = "Proxmox VM ID" description = "Proxmox VM ID"
@ -68,18 +74,21 @@ variable "clone_vmid" {
variable "username" { variable "username" {
type = string type = string
description = "VM user account name" default = null
description = "VM user account name (Linux only, cloud-init)"
} }
variable "password" { variable "password" {
type = string type = string
default = null
sensitive = true sensitive = true
description = "VM user account password" description = "VM user account password (Linux only, cloud-init)"
} }
variable "ssh_key_path" { variable "ssh_key_path" {
type = string type = string
description = "Path to SSH public key file" default = null
description = "Path to SSH public key file (Linux only, cloud-init)"
} }
variable "firewall_enabled" { variable "firewall_enabled" {

View File

@ -70,5 +70,11 @@ variable "gateway" {
variable "template_vmid" { variable "template_vmid" {
type = number type = number
default = 9000 default = 9000
description = "Template VM ID to clone from" description = "Template VM ID to clone from (Linux)"
}
variable "windows_template_vmid" {
type = number
default = 10000
description = "Template VM ID to clone from (Windows)"
} }