policy/v2: overhaul compat test infrastructure

Reworks the ACL/routes/grant/SSH compat harnesses to read
testcapture.Capture typed files, per-scenario topologies, strict error
wording match, and shared helpers. Surfaces policy-engine drift against
Tailscale SaaS.

Updates #3157
This commit is contained in:
Kristoffer Dalby
2026-04-15 08:32:10 +00:00
parent f34dec2754
commit a7c9721faa
5 changed files with 2335 additions and 712 deletions

View File

@@ -1 +1,431 @@
package matcher
import (
"net/netip"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
)
func TestMatchFromStrings(t *testing.T) {
t.Parallel()
tests := []struct {
name string
srcs []string
dsts []string
wantSrc netip.Addr
wantDst netip.Addr
srcIn bool
dstIn bool
}{
{
name: "basic CIDR match",
srcs: []string{"10.0.0.0/8"},
dsts: []string{"192.168.1.0/24"},
wantSrc: netip.MustParseAddr("10.1.2.3"),
wantDst: netip.MustParseAddr("192.168.1.100"),
srcIn: true,
dstIn: true,
},
{
name: "basic CIDR no match",
srcs: []string{"10.0.0.0/8"},
dsts: []string{"192.168.1.0/24"},
wantSrc: netip.MustParseAddr("172.16.0.1"),
wantDst: netip.MustParseAddr("10.0.0.1"),
srcIn: false,
dstIn: false,
},
{
name: "wildcard matches everything",
srcs: []string{"*"},
dsts: []string{"*"},
wantSrc: netip.MustParseAddr("8.8.8.8"),
wantDst: netip.MustParseAddr("1.1.1.1"),
srcIn: true,
dstIn: true,
},
{
name: "wildcard matches IPv6",
srcs: []string{"*"},
dsts: []string{"*"},
wantSrc: netip.MustParseAddr("2001:db8::1"),
wantDst: netip.MustParseAddr("fd7a:115c:a1e0::1"),
srcIn: true,
dstIn: true,
},
{
name: "single IP source",
srcs: []string{"100.64.0.1"},
dsts: []string{"10.0.0.0/8"},
wantSrc: netip.MustParseAddr("100.64.0.1"),
wantDst: netip.MustParseAddr("10.33.0.1"),
srcIn: true,
dstIn: true,
},
{
name: "single IP source no match",
srcs: []string{"100.64.0.1"},
dsts: []string{"10.0.0.0/8"},
wantSrc: netip.MustParseAddr("100.64.0.2"),
wantDst: netip.MustParseAddr("10.33.0.1"),
srcIn: false,
dstIn: true,
},
{
name: "multiple CIDRs",
srcs: []string{"10.0.0.0/8", "172.16.0.0/12"},
dsts: []string{"192.168.0.0/16", "100.64.0.0/10"},
wantSrc: netip.MustParseAddr("172.20.0.1"),
wantDst: netip.MustParseAddr("100.100.0.1"),
srcIn: true,
dstIn: true,
},
{
name: "IPv6 CIDR",
srcs: []string{"fd7a:115c:a1e0::/48"},
dsts: []string{"2001:db8::/32"},
wantSrc: netip.MustParseAddr("fd7a:115c:a1e0::1"),
wantDst: netip.MustParseAddr("2001:db8::1"),
srcIn: true,
dstIn: true,
},
{
name: "empty sources and destinations",
srcs: []string{},
dsts: []string{},
wantSrc: netip.MustParseAddr("10.0.0.1"),
wantDst: netip.MustParseAddr("10.0.0.1"),
srcIn: false,
dstIn: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
m := MatchFromStrings(tt.srcs, tt.dsts)
assert.Equal(t, tt.srcIn, m.SrcsContainsIPs(tt.wantSrc),
"SrcsContainsIPs(%s)", tt.wantSrc)
assert.Equal(t, tt.dstIn, m.DestsContainsIP(tt.wantDst),
"DestsContainsIP(%s)", tt.wantDst)
})
}
}
func TestMatchFromFilterRule(t *testing.T) {
t.Parallel()
tests := []struct {
name string
rule tailcfg.FilterRule
checkSrc netip.Addr
checkDst netip.Addr
srcMatch bool
dstMatch bool
}{
{
name: "standard rule with port range",
rule: tailcfg.FilterRule{
SrcIPs: []string{"100.64.0.1", "fd7a:115c:a1e0::1"},
DstPorts: []tailcfg.NetPortRange{
{IP: "10.33.0.0/16", Ports: tailcfg.PortRange{First: 0, Last: 65535}},
},
},
checkSrc: netip.MustParseAddr("100.64.0.1"),
checkDst: netip.MustParseAddr("10.33.0.50"),
srcMatch: true,
dstMatch: true,
},
{
name: "wildcard destination",
rule: tailcfg.FilterRule{
SrcIPs: []string{"10.0.0.0/8"},
DstPorts: []tailcfg.NetPortRange{
{IP: "*"},
},
},
checkSrc: netip.MustParseAddr("10.1.1.1"),
checkDst: netip.MustParseAddr("8.8.8.8"),
srcMatch: true,
dstMatch: true,
},
{
name: "multiple DstPorts entries",
rule: tailcfg.FilterRule{
SrcIPs: []string{"100.64.0.1"},
DstPorts: []tailcfg.NetPortRange{
{IP: "10.33.0.0/16"},
{IP: "192.168.1.0/24"},
},
},
checkSrc: netip.MustParseAddr("100.64.0.1"),
checkDst: netip.MustParseAddr("192.168.1.50"),
srcMatch: true,
dstMatch: true,
},
{
name: "empty DstPorts",
rule: tailcfg.FilterRule{
SrcIPs: []string{"100.64.0.1"},
DstPorts: nil,
},
checkSrc: netip.MustParseAddr("100.64.0.1"),
checkDst: netip.MustParseAddr("10.0.0.1"),
srcMatch: true,
dstMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
m := MatchFromFilterRule(tt.rule)
assert.Equal(t, tt.srcMatch, m.SrcsContainsIPs(tt.checkSrc),
"SrcsContainsIPs(%s)", tt.checkSrc)
assert.Equal(t, tt.dstMatch, m.DestsContainsIP(tt.checkDst),
"DestsContainsIP(%s)", tt.checkDst)
})
}
}
func TestMatchesFromFilterRules(t *testing.T) {
t.Parallel()
rules := []tailcfg.FilterRule{
{
SrcIPs: []string{"10.0.0.0/8"},
DstPorts: []tailcfg.NetPortRange{{IP: "192.168.1.0/24"}},
},
{
SrcIPs: []string{"172.16.0.0/12"},
DstPorts: []tailcfg.NetPortRange{{IP: "10.33.0.0/16"}},
},
}
matches := MatchesFromFilterRules(rules)
require.Len(t, matches, 2)
// First matcher: 10.0.0.0/8 -> 192.168.1.0/24
assert.True(t, matches[0].SrcsContainsIPs(netip.MustParseAddr("10.1.2.3")))
assert.False(t, matches[0].SrcsContainsIPs(netip.MustParseAddr("172.16.0.1")))
assert.True(t, matches[0].DestsContainsIP(netip.MustParseAddr("192.168.1.100")))
// Second matcher: 172.16.0.0/12 -> 10.33.0.0/16
assert.True(t, matches[1].SrcsContainsIPs(netip.MustParseAddr("172.20.0.1")))
assert.True(t, matches[1].DestsContainsIP(netip.MustParseAddr("10.33.0.1")))
assert.False(t, matches[1].DestsContainsIP(netip.MustParseAddr("192.168.1.1")))
}
func TestSrcsOverlapsPrefixes(t *testing.T) {
t.Parallel()
tests := []struct {
name string
srcs []string
prefixes []netip.Prefix
want bool
}{
{
name: "exact match",
srcs: []string{"10.33.0.0/16"},
prefixes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
want: true,
},
{
name: "parent contains child",
srcs: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
want: true,
},
{
name: "child overlaps parent",
srcs: []string{"10.33.0.0/16"},
prefixes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/8")},
want: true,
},
{
name: "no overlap",
srcs: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{netip.MustParsePrefix("192.168.1.0/24")},
want: false,
},
{
name: "multiple prefixes one overlaps",
srcs: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{
netip.MustParsePrefix("192.168.1.0/24"),
netip.MustParsePrefix("10.33.0.0/16"),
},
want: true,
},
{
name: "IPv6 overlap",
srcs: []string{"fd7a:115c:a1e0::/48"},
prefixes: []netip.Prefix{netip.MustParsePrefix("fd7a:115c:a1e0:ab12::/64")},
want: true,
},
{
name: "empty prefixes",
srcs: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
m := MatchFromStrings(tt.srcs, nil)
got := m.SrcsOverlapsPrefixes(tt.prefixes...)
assert.Equal(t, tt.want, got)
})
}
}
func TestDestsOverlapsPrefixes(t *testing.T) {
t.Parallel()
tests := []struct {
name string
dsts []string
prefixes []netip.Prefix
want bool
}{
{
name: "exact match",
dsts: []string{"10.33.0.0/16"},
prefixes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
want: true,
},
{
name: "parent contains child",
dsts: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
want: true,
},
{
name: "no overlap",
dsts: []string{"10.0.0.0/8"},
prefixes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/16")},
want: false,
},
{
name: "wildcard overlaps everything",
dsts: []string{"*"},
prefixes: []netip.Prefix{
netip.MustParsePrefix("0.0.0.0/0"),
},
want: true,
},
{
name: "wildcard overlaps exit route",
dsts: []string{"*"},
prefixes: []netip.Prefix{netip.MustParsePrefix("::/0")},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
m := MatchFromStrings(nil, tt.dsts)
got := m.DestsOverlapsPrefixes(tt.prefixes...)
assert.Equal(t, tt.want, got)
})
}
}
func TestDestsIsTheInternet(t *testing.T) {
t.Parallel()
tests := []struct {
name string
dsts []string
want bool
}{
{
name: "all IPv4 is the internet",
dsts: []string{"0.0.0.0/0"},
want: true,
},
{
name: "all IPv6 is the internet",
dsts: []string{"::/0"},
want: true,
},
{
name: "wildcard is the internet",
dsts: []string{"*"},
want: true,
},
{
name: "private range is not the internet",
dsts: []string{"10.0.0.0/8"},
want: false,
},
{
name: "CGNAT range is not the internet",
dsts: []string{"100.64.0.0/10"},
want: false,
},
{
name: "single public IP is not the internet",
dsts: []string{"8.8.8.8"},
want: false,
},
{
name: "empty dests is not the internet",
dsts: []string{},
want: false,
},
{
name: "multiple private ranges are not the internet",
dsts: []string{
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
},
want: false,
},
{
name: "all IPv4 combined with subnet is the internet",
dsts: []string{"0.0.0.0/0", "10.33.0.0/16"},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
m := MatchFromStrings(nil, tt.dsts)
got := m.DestsIsTheInternet()
assert.Equal(t, tt.want, got,
"DestsIsTheInternet() for dsts=%v", tt.dsts)
})
}
}
func TestDebugString(t *testing.T) {
t.Parallel()
m := MatchFromStrings(
[]string{"10.0.0.0/8"},
[]string{"192.168.1.0/24"},
)
s := m.DebugString()
assert.Contains(t, s, "Match:")
assert.Contains(t, s, "Sources:")
assert.Contains(t, s, "Destinations:")
assert.Contains(t, s, "10.0.0.0/8")
assert.Contains(t, s, "192.168.1.0/24")
}

View File

@@ -1,22 +1,23 @@
// This file implements a data-driven test runner for ACL compatibility tests.
// It loads JSON golden files from testdata/acl_results/ACL-*.json and compares
// headscale's ACL engine output against the expected packet filter rules.
// It loads HuJSON golden files from testdata/acl_results/acl-*.hujson and
// compares headscale's ACL engine output against the expected packet filter
// rules captured from Tailscale SaaS by the tscap tool.
//
// The JSON files were converted from the original inline Go struct test cases
// in tailscale_acl_compat_test.go. Each file contains:
// - A full policy (groups, tagOwners, hosts, acls)
// - Expected packet_filter_rules per node (5 nodes)
// - Or an error response for invalid policies
// Each file is a testcapture.Capture containing:
// - The full policy that was POSTed to the Tailscale SaaS API
// - The 8-node topology used for the capture run
// - Expected packet_filter_rules per node (or error metadata for
// scenarios that the SaaS rejected)
//
// Test data source: testdata/acl_results/ACL-*.json
// Original source: Tailscale SaaS API captures + headscale-generated expansions
// Test data source: testdata/acl_results/acl-*.hujson
// Source format: github.com/juanfont/headscale/hscontrol/types/testcapture
package v2
import (
"encoding/json"
"fmt"
"net/netip"
"os"
"path/filepath"
"strings"
"testing"
@@ -25,9 +26,8 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
"github.com/juanfont/headscale/hscontrol/types/testcapture"
"github.com/stretchr/testify/require"
"github.com/tailscale/hujson"
"gorm.io/gorm"
"tailscale.com/tailcfg"
)
@@ -40,55 +40,56 @@ func ptrAddr(s string) *netip.Addr {
}
// setupACLCompatUsers returns the 3 test users for ACL compatibility tests.
// Email addresses use @example.com domain, matching the converted Tailscale
// policy format (Tailscale uses @passkey and @dalby.cc).
// Names and emails match the anonymized identifiers tscap writes into the
// capture files (see github.com/kradalby/tscap/anonymize): users get
// norse-god names and nodes get original-151 pokémon names.
func setupACLCompatUsers() types.Users {
return types.Users{
{Model: gorm.Model{ID: 1}, Name: "kratail2tid", Email: "kratail2tid@example.com"},
{Model: gorm.Model{ID: 2}, Name: "kristoffer", Email: "kristoffer@example.com"},
{Model: gorm.Model{ID: 3}, Name: "monitorpasskeykradalby", Email: "monitorpasskeykradalby@example.com"},
{Model: gorm.Model{ID: 1}, Name: "odin", Email: "odin@example.com"},
{Model: gorm.Model{ID: 2}, Name: "thor", Email: "thor@example.org"},
{Model: gorm.Model{ID: 3}, Name: "freya", Email: "freya@example.com"},
}
}
// setupACLCompatNodes returns the 8 test nodes for ACL compatibility tests.
// Uses the same topology as the grants compat tests.
// Node GivenNames match tscap's anonymized pokémon naming.
func setupACLCompatNodes(users types.Users) types.Nodes {
return types.Nodes{
{
ID: 1, GivenName: "user1",
ID: 1, GivenName: "bulbasaur",
User: &users[0], UserID: &users[0].ID,
IPv4: ptrAddr("100.90.199.68"), IPv6: ptrAddr("fd7a:115c:a1e0::2d01:c747"),
Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 2, GivenName: "user-kris",
ID: 2, GivenName: "ivysaur",
User: &users[1], UserID: &users[1].ID,
IPv4: ptrAddr("100.110.121.96"), IPv6: ptrAddr("fd7a:115c:a1e0::1737:7960"),
Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 3, GivenName: "user-mon",
ID: 3, GivenName: "venusaur",
User: &users[2], UserID: &users[2].ID,
IPv4: ptrAddr("100.103.90.82"), IPv6: ptrAddr("fd7a:115c:a1e0::9e37:5a52"),
Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 4, GivenName: "tagged-server",
ID: 4, GivenName: "beedrill",
IPv4: ptrAddr("100.108.74.26"), IPv6: ptrAddr("fd7a:115c:a1e0::b901:4a87"),
Tags: []string{"tag:server"}, Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 5, GivenName: "tagged-prod",
ID: 5, GivenName: "kakuna",
IPv4: ptrAddr("100.103.8.15"), IPv6: ptrAddr("fd7a:115c:a1e0::5b37:80f"),
Tags: []string{"tag:prod"}, Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 6, GivenName: "tagged-client",
ID: 6, GivenName: "weedle",
IPv4: ptrAddr("100.83.200.69"), IPv6: ptrAddr("fd7a:115c:a1e0::c537:c845"),
Tags: []string{"tag:client"}, Hostinfo: &tailcfg.Hostinfo{},
},
{
ID: 7, GivenName: "subnet-router",
ID: 7, GivenName: "squirtle",
IPv4: ptrAddr("100.92.142.61"), IPv6: ptrAddr("fd7a:115c:a1e0::3e37:8e3d"),
Tags: []string{"tag:router"},
Hostinfo: &tailcfg.Hostinfo{
@@ -97,7 +98,7 @@ func setupACLCompatNodes(users types.Users) types.Nodes {
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
},
{
ID: 8, GivenName: "exit-node",
ID: 8, GivenName: "charmander",
IPv4: ptrAddr("100.85.66.106"), IPv6: ptrAddr("fd7a:115c:a1e0::7c37:426a"),
Tags: []string{"tag:exit"}, Hostinfo: &tailcfg.Hostinfo{},
},
@@ -140,40 +141,21 @@ func cmpOptions() []cmp.Option {
return a.Bits() < b.Bits()
}),
// Compare json.RawMessage semantically rather than by exact
// bytes to handle indentation differences between the policy
// source and the golden capture data.
cmp.Comparer(func(a, b json.RawMessage) bool {
var va, vb any
err := json.Unmarshal(a, &va)
if err != nil {
return string(a) == string(b)
}
err = json.Unmarshal(b, &vb)
if err != nil {
return string(a) == string(b)
}
ja, _ := json.Marshal(va)
jb, _ := json.Marshal(vb)
return string(ja) == string(jb)
}),
// Compare tailcfg.RawMessage semantically (it's a string type
// containing JSON) to handle indentation differences.
// containing JSON) to handle indentation differences. Both
// sides must be valid JSON — golden data parse failures are
// always errors.
cmp.Comparer(func(a, b tailcfg.RawMessage) bool {
var va, vb any
err := json.Unmarshal([]byte(a), &va)
if err != nil {
return a == b
panic(fmt.Sprintf("golden RawMessage A unparseable: %v", err))
}
err = json.Unmarshal([]byte(b), &vb)
if err != nil {
return a == b
panic(fmt.Sprintf("golden RawMessage B unparseable: %v", err))
}
ja, _ := json.Marshal(va)
@@ -184,69 +166,94 @@ func cmpOptions() []cmp.Option {
}
}
// aclTestFile represents the JSON structure of a captured ACL test file.
type aclTestFile struct {
TestID string `json:"test_id"`
Source string `json:"source"` // "tailscale_saas" or "headscale_adapted"
Error bool `json:"error"`
HeadscaleDiffers bool `json:"headscale_differs"`
ParentTest string `json:"parent_test"`
Input struct {
FullPolicy json.RawMessage `json:"full_policy"`
APIResponseCode int `json:"api_response_code"`
APIResponseBody *struct {
Message string `json:"message"`
} `json:"api_response_body"`
} `json:"input"`
Topology struct {
Nodes map[string]struct {
Hostname string `json:"hostname"`
Tags []string `json:"tags"`
IPv4 string `json:"ipv4"`
IPv6 string `json:"ipv6"`
User string `json:"user"`
RoutableIPs []string `json:"routable_ips"`
ApprovedRoutes []string `json:"approved_routes"`
} `json:"nodes"`
} `json:"topology"`
Captures map[string]struct {
PacketFilterRules json.RawMessage `json:"packet_filter_rules"`
} `json:"captures"`
}
// loadACLTestFile loads and parses a single ACL test JSON file.
func loadACLTestFile(t *testing.T, path string) aclTestFile {
// buildACLUsersAndNodes constructs users and nodes from an ACL
// golden file's topology. This ensures the test creates the same
// nodes that were present during the Tailscale SaaS capture.
func buildACLUsersAndNodes(
t *testing.T,
tf *testcapture.Capture,
) (types.Users, types.Nodes) {
t.Helper()
content, err := os.ReadFile(path)
require.NoError(t, err, "failed to read test file %s", path)
users := setupACLCompatUsers()
nodes := make(types.Nodes, 0, len(tf.Topology.Nodes))
autoID := 1
ast, err := hujson.Parse(content)
require.NoError(t, err, "failed to parse HuJSON in %s", path)
ast.Standardize()
for name, nodeDef := range tf.Topology.Nodes {
node := &types.Node{
ID: types.NodeID(autoID), //nolint:gosec
GivenName: name,
IPv4: ptrAddr(nodeDef.IPv4),
IPv6: ptrAddr(nodeDef.IPv6),
Tags: nodeDef.Tags,
}
autoID++
var tf aclTestFile
hostinfo := &tailcfg.Hostinfo{}
err = json.Unmarshal(ast.Pack(), &tf)
require.NoError(t, err, "failed to unmarshal test file %s", path)
if len(nodeDef.RoutableIPs) > 0 {
routableIPs := make(
[]netip.Prefix, 0, len(nodeDef.RoutableIPs),
)
return tf
for _, r := range nodeDef.RoutableIPs {
routableIPs = append(
routableIPs, netip.MustParsePrefix(r),
)
}
hostinfo.RoutableIPs = routableIPs
}
node.Hostinfo = hostinfo
if len(nodeDef.ApprovedRoutes) > 0 {
approved := make(
[]netip.Prefix, 0, len(nodeDef.ApprovedRoutes),
)
for _, r := range nodeDef.ApprovedRoutes {
approved = append(
approved, netip.MustParsePrefix(r),
)
}
node.ApprovedRoutes = approved
} else {
node.ApprovedRoutes = []netip.Prefix{}
}
// Assign user — untagged nodes get user1
if len(nodeDef.Tags) == 0 {
if nodeDef.User != "" {
for i := range users {
if users[i].Name == nodeDef.User {
node.User = &users[i]
node.UserID = &users[i].ID
break
}
}
} else {
node.User = &users[0]
node.UserID = &users[0].ID
}
}
nodes = append(nodes, node)
}
return users, nodes
}
// aclSkipReasons documents WHY tests are expected to fail and WHAT needs to be
// implemented to fix them. Tests are grouped by root cause.
//
// Impact summary:
//
// SRCIPS_FORMAT - tests: SrcIPs use adapted format (100.64.0.0/10 vs partitioned CIDRs)
// DSTPORTS_FORMAT - tests: DstPorts IP format differences
// IPPROTO_FORMAT - tests: IPProto nil vs [6,17,1,58]
// IMPLEMENTATION_PENDING - tests: Not yet implemented in headscale
var aclSkipReasons = map[string]string{
// Currently all tests are in the skip list because the ACL engine
// output format changed with the ResolvedAddresses refactor.
// Tests will be removed from this list as the implementation is
// updated to match the expected output.
// loadACLTestFile loads and parses a single ACL capture HuJSON file.
func loadACLTestFile(t *testing.T, path string) *testcapture.Capture {
t.Helper()
c, err := testcapture.Read(path)
require.NoError(t, err, "failed to read test file %s", path)
return c
}
// TestACLCompat is a data-driven test that loads all ACL-*.json test files
@@ -260,65 +267,55 @@ func TestACLCompat(t *testing.T) {
t.Parallel()
files, err := filepath.Glob(
filepath.Join("testdata", "acl_results", "ACL-*.hujson"),
filepath.Join("testdata", "acl_results", "acl-*.hujson"),
)
require.NoError(t, err, "failed to glob test files")
require.NotEmpty(
t,
files,
"no ACL-*.hujson test files found in testdata/acl_results/",
"no acl-*.hujson test files found in testdata/acl_results/",
)
t.Logf("Loaded %d ACL test files", len(files))
users := setupACLCompatUsers()
nodes := setupACLCompatNodes(users)
for _, file := range files {
tf := loadACLTestFile(t, file)
t.Run(tf.TestID, func(t *testing.T) {
t.Parallel()
// Check skip list
if reason, ok := aclSkipReasons[tf.TestID]; ok {
t.Skipf(
"TODO: %s — see aclSkipReasons for details",
reason,
)
return
}
if tf.Error {
testACLError(t, tf)
return
}
// Build nodes per-scenario from this file's topology.
// tscap uses clean-slate mode, so each scenario has
// different node IPs; using a shared topology would
// cause IP mismatches in filter rule comparisons.
users, nodes := buildACLUsersAndNodes(t, tf)
require.NotEmpty(t, nodes, "%s: topology is empty", tf.TestID)
testACLSuccess(t, tf, users, nodes)
})
}
}
// testACLError verifies that an invalid policy produces the expected error.
func testACLError(t *testing.T, tf aclTestFile) {
func testACLError(t *testing.T, tf *testcapture.Capture) {
t.Helper()
policyJSON := convertPolicyUserEmails(tf.Input.FullPolicy)
pol, err := unmarshalPolicy(policyJSON)
if err != nil {
// Parse-time error — valid for some error tests
// Parse-time error.
if tf.Input.APIResponseBody != nil {
wantMsg := tf.Input.APIResponseBody.Message
if wantMsg != "" {
assert.Contains(
t,
err.Error(),
wantMsg,
"%s: error message should contain expected substring",
tf.TestID,
assertACLErrorContains(
t, err, wantMsg, tf.TestID,
)
}
}
@@ -331,64 +328,52 @@ func testACLError(t *testing.T, tf aclTestFile) {
if tf.Input.APIResponseBody != nil {
wantMsg := tf.Input.APIResponseBody.Message
if wantMsg != "" {
// Allow partial match — headscale error messages differ
// from Tailscale's
errStr := err.Error()
if !strings.Contains(errStr, wantMsg) {
// Try matching key parts
matched := false
for _, part := range []string{
"autogroup:self",
"not valid on the src",
"port range",
"tag not found",
"undefined",
} {
if strings.Contains(wantMsg, part) &&
strings.Contains(errStr, part) {
matched = true
break
}
}
if !matched {
t.Logf(
"%s: error message difference\n want (tailscale): %q\n got (headscale): %q",
tf.TestID,
wantMsg,
errStr,
)
}
}
assertACLErrorContains(
t, err, wantMsg, tf.TestID,
)
}
}
return
}
// For headscale_differs tests, headscale may accept what Tailscale rejects
if tf.HeadscaleDiffers {
t.Logf(
"%s: headscale accepts this policy (Tailscale rejects it)",
tf.TestID,
)
return
}
t.Errorf(
"%s: expected error but policy parsed and validated successfully",
tf.TestID,
)
}
// assertACLErrorContains requires that headscale's error contains the
// Tailscale SaaS error message verbatim. Divergence means an emitter
// needs to be aligned, not papered over with a translation table.
func assertACLErrorContains(
t *testing.T,
err error,
wantMsg string,
testID string,
) {
t.Helper()
errStr := err.Error()
if strings.Contains(errStr, wantMsg) {
return
}
t.Errorf(
"%s: error message mismatch\n"+
" want (tailscale): %q\n"+
" got (headscale): %q",
testID,
wantMsg,
errStr,
)
}
// testACLSuccess verifies that a valid policy produces the expected
// packet filter rules for each node.
func testACLSuccess(
t *testing.T,
tf aclTestFile,
tf *testcapture.Capture,
users types.Users,
nodes types.Nodes,
) {
@@ -415,9 +400,6 @@ func testACLSuccess(
for nodeName, capture := range tf.Captures {
t.Run(nodeName, func(t *testing.T) {
captureIsNull := len(capture.PacketFilterRules) == 0 ||
string(capture.PacketFilterRules) == "null" //nolint:goconst
node := findNodeByGivenName(nodes, nodeName)
if node == nil {
t.Skipf(
@@ -447,21 +429,7 @@ func testACLSuccess(
compiledRules,
)
// Parse expected rules from JSON
var wantRules []tailcfg.FilterRule
if !captureIsNull {
err = json.Unmarshal(
capture.PacketFilterRules,
&wantRules,
)
require.NoError(
t,
err,
"%s/%s: failed to unmarshal expected rules",
tf.TestID,
nodeName,
)
}
wantRules := capture.PacketFilterRules
// Compare
opts := append(

View File

@@ -1,27 +1,24 @@
// This file is "generated" by Claude.
// It contains a data-driven test that reads 237 GRANT-*.json test files
// captured from Tailscale SaaS. Each file contains:
// - A policy with grants (and optionally ACLs)
// - The expected packet_filter_rules for each of 8 test nodes
// This file implements a data-driven test runner for grant compatibility
// tests. It loads HuJSON golden files from testdata/grant_results/grant-*.hujson
// and via-grant-*.hujson, captured from Tailscale SaaS by tscap, and compares
// headscale's grants engine output against the captured packet filter rules.
//
// Each file is a testcapture.Capture containing:
// - A full policy with grants (and optionally ACLs)
// - The expected packet_filter_rules for each of 8-15 test nodes
// - Or an error response for invalid policies
//
// The test loads each JSON file, applies the policy through headscale's
// grants engine, and compares the output against Tailscale's actual behavior.
// Tests known to fail due to unimplemented features or known differences are
// skipped with a TODO comment explaining the root cause. As headscale's grants
// implementation improves, tests should be removed from the skip list.
//
// Tests that are known to fail due to unimplemented features or known
// differences are skipped with a TODO comment explaining the root cause.
// As headscale's grants implementation improves, tests should be removed
// from the skip list.
//
// Test data source: testdata/grant_results/GRANT-*.json
// Captured from: Tailscale SaaS API + tailscale debug localapi
// Test data source: testdata/grant_results/{grant,via-grant}-*.hujson
// Source format: github.com/juanfont/headscale/hscontrol/types/testcapture
package v2
import (
"encoding/json"
"net/netip"
"os"
"path/filepath"
"strings"
"testing"
@@ -30,54 +27,33 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/testcapture"
"github.com/stretchr/testify/require"
"github.com/tailscale/hujson"
"gorm.io/gorm"
"tailscale.com/tailcfg"
)
// grantTestFile represents the JSON structure of a captured grant test file.
type grantTestFile struct {
TestID string `json:"test_id"`
Error bool `json:"error"`
Input struct {
FullPolicy json.RawMessage `json:"full_policy"`
APIResponseCode int `json:"api_response_code"`
APIResponseBody *struct {
Message string `json:"message"`
} `json:"api_response_body"`
} `json:"input"`
Topology struct {
Nodes map[string]struct {
Hostname string `json:"hostname"`
Tags []string `json:"tags"`
IPv4 string `json:"ipv4"`
IPv6 string `json:"ipv6"`
} `json:"nodes"`
} `json:"topology"`
Captures map[string]struct {
PacketFilterRules json.RawMessage `json:"packet_filter_rules"`
} `json:"captures"`
}
// setupGrantsCompatUsers returns the 3 test users for grants compatibility tests.
// Email addresses use @example.com domain, matching the converted Tailscale policy format.
// Users get norse-god names; nodes get original-151 pokémon names — matching
// the anonymized identifiers tscap writes into the capture files
// (see github.com/kradalby/tscap/anonymize).
func setupGrantsCompatUsers() types.Users {
return types.Users{
{Model: gorm.Model{ID: 1}, Name: "kratail2tid", Email: "kratail2tid@example.com"},
{Model: gorm.Model{ID: 2}, Name: "kristoffer", Email: "kristoffer@example.com"},
{Model: gorm.Model{ID: 3}, Name: "monitorpasskeykradalby", Email: "monitorpasskeykradalby@example.com"},
{Model: gorm.Model{ID: 1}, Name: "odin", Email: "odin@example.com"},
{Model: gorm.Model{ID: 2}, Name: "thor", Email: "thor@example.org"},
{Model: gorm.Model{ID: 3}, Name: "freya", Email: "freya@example.com"},
}
}
// setupGrantsCompatNodes returns the 8 test nodes for grants compatibility tests.
// setupGrantsCompatNodes returns the 15 test nodes for grants compatibility tests.
// The node configuration matches the Tailscale test environment:
// - 3 user-owned nodes (user1, user-kris, user-mon)
// - 5 tagged nodes (tagged-server, tagged-prod, tagged-client, subnet-router, exit-node)
// - 3 user-owned nodes (bulbasaur, ivysaur, venusaur)
// - 12 tagged nodes (beedrill, kakuna, weedle, squirtle, charmander,
// pidgey, pidgeotto, rattata, raticate, spearow, fearow, blastoise)
func setupGrantsCompatNodes(users types.Users) types.Nodes {
nodeUser1 := &types.Node{
nodeBulbasaur := &types.Node{
ID: 1,
GivenName: "user1",
GivenName: "bulbasaur",
User: &users[0],
UserID: &users[0].ID,
IPv4: ptrAddr("100.90.199.68"),
@@ -85,9 +61,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
Hostinfo: &tailcfg.Hostinfo{},
}
nodeUserKris := &types.Node{
nodeIvysaur := &types.Node{
ID: 2,
GivenName: "user-kris",
GivenName: "ivysaur",
User: &users[1],
UserID: &users[1].ID,
IPv4: ptrAddr("100.110.121.96"),
@@ -95,9 +71,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
Hostinfo: &tailcfg.Hostinfo{},
}
nodeUserMon := &types.Node{
nodeVenusaur := &types.Node{
ID: 3,
GivenName: "user-mon",
GivenName: "venusaur",
User: &users[2],
UserID: &users[2].ID,
IPv4: ptrAddr("100.103.90.82"),
@@ -105,36 +81,36 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
Hostinfo: &tailcfg.Hostinfo{},
}
nodeTaggedServer := &types.Node{
nodeBeedrill := &types.Node{
ID: 4,
GivenName: "tagged-server",
GivenName: "beedrill",
IPv4: ptrAddr("100.108.74.26"),
IPv6: ptrAddr("fd7a:115c:a1e0::b901:4a87"),
Tags: []string{"tag:server"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodeTaggedProd := &types.Node{
nodeKakuna := &types.Node{
ID: 5,
GivenName: "tagged-prod",
GivenName: "kakuna",
IPv4: ptrAddr("100.103.8.15"),
IPv6: ptrAddr("fd7a:115c:a1e0::5b37:80f"),
Tags: []string{"tag:prod"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodeTaggedClient := &types.Node{
nodeWeedle := &types.Node{
ID: 6,
GivenName: "tagged-client",
GivenName: "weedle",
IPv4: ptrAddr("100.83.200.69"),
IPv6: ptrAddr("fd7a:115c:a1e0::c537:c845"),
Tags: []string{"tag:client"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodeSubnetRouter := &types.Node{
nodeSquirtle := &types.Node{
ID: 7,
GivenName: "subnet-router",
GivenName: "squirtle",
IPv4: ptrAddr("100.92.142.61"),
IPv6: ptrAddr("fd7a:115c:a1e0::3e37:8e3d"),
Tags: []string{"tag:router"},
@@ -144,9 +120,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
}
nodeExitNode := &types.Node{
nodeCharmander := &types.Node{
ID: 8,
GivenName: "exit-node",
GivenName: "charmander",
IPv4: ptrAddr("100.85.66.106"),
IPv6: ptrAddr("fd7a:115c:a1e0::7c37:426a"),
Tags: []string{"tag:exit"},
@@ -164,9 +140,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
// --- New nodes for expanded via grant topology ---
nodeExitA := &types.Node{
nodePidgey := &types.Node{
ID: 9,
GivenName: "exit-a",
GivenName: "pidgey",
IPv4: ptrAddr("100.124.195.93"),
IPv6: ptrAddr("fd7a:115c:a1e0::7837:c35d"),
Tags: []string{"tag:exit-a"},
@@ -182,9 +158,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
},
}
nodeExitB := &types.Node{
nodePidgeotto := &types.Node{
ID: 10,
GivenName: "exit-b",
GivenName: "pidgeotto",
IPv4: ptrAddr("100.116.18.24"),
IPv6: ptrAddr("fd7a:115c:a1e0::ff37:1218"),
Tags: []string{"tag:exit-b"},
@@ -200,27 +176,27 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
},
}
nodeGroupA := &types.Node{
nodeRattata := &types.Node{
ID: 11,
GivenName: "group-a-client",
GivenName: "rattata",
IPv4: ptrAddr("100.107.162.14"),
IPv6: ptrAddr("fd7a:115c:a1e0::a237:a20e"),
Tags: []string{"tag:group-a"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodeGroupB := &types.Node{
nodeRaticate := &types.Node{
ID: 12,
GivenName: "group-b-client",
GivenName: "raticate",
IPv4: ptrAddr("100.77.135.18"),
IPv6: ptrAddr("fd7a:115c:a1e0::4b37:8712"),
Tags: []string{"tag:group-b"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodeRouterA := &types.Node{
nodeSpearow := &types.Node{
ID: 13,
GivenName: "router-a",
GivenName: "spearow",
IPv4: ptrAddr("100.109.43.124"),
IPv6: ptrAddr("fd7a:115c:a1e0::a537:2b7c"),
Tags: []string{"tag:router-a"},
@@ -230,9 +206,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.44.0.0/16")},
}
nodeRouterB := &types.Node{
nodeFearow := &types.Node{
ID: 14,
GivenName: "router-b",
GivenName: "fearow",
IPv4: ptrAddr("100.65.172.123"),
IPv6: ptrAddr("fd7a:115c:a1e0::5a37:ac7c"),
Tags: []string{"tag:router-b"},
@@ -242,9 +218,9 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.55.0.0/16")},
}
nodeMultiExitRouter := &types.Node{
nodeBlastoise := &types.Node{
ID: 15,
GivenName: "multi-exit-router",
GivenName: "blastoise",
IPv4: ptrAddr("100.105.127.107"),
IPv6: ptrAddr("fd7a:115c:a1e0::9537:7f6b"),
Tags: []string{"tag:exit", "tag:router"},
@@ -263,21 +239,21 @@ func setupGrantsCompatNodes(users types.Users) types.Nodes {
}
return types.Nodes{
nodeUser1,
nodeUserKris,
nodeUserMon,
nodeTaggedServer,
nodeTaggedProd,
nodeTaggedClient,
nodeSubnetRouter,
nodeExitNode,
nodeExitA,
nodeExitB,
nodeGroupA,
nodeGroupB,
nodeRouterA,
nodeRouterB,
nodeMultiExitRouter,
nodeBulbasaur,
nodeIvysaur,
nodeVenusaur,
nodeBeedrill,
nodeKakuna,
nodeWeedle,
nodeSquirtle,
nodeCharmander,
nodePidgey,
nodePidgeotto,
nodeRattata,
nodeRaticate,
nodeSpearow,
nodeFearow,
nodeBlastoise,
}
}
@@ -292,41 +268,87 @@ func findGrantsNode(nodes types.Nodes, name string) *types.Node {
return nil
}
// convertPolicyUserEmails converts Tailscale SaaS user email formats to
// headscale-compatible @example.com format in the raw policy JSON.
//
// Tailscale uses provider-specific email formats:
// - kratail2tid@passkey (passkey auth)
// - kristoffer@dalby.cc (email auth)
// - monitorpasskeykradalby@passkey (passkey auth)
//
// Headscale resolves users by Email field, so we convert all to @example.com.
func convertPolicyUserEmails(policyJSON []byte) []byte {
s := string(policyJSON)
s = strings.ReplaceAll(s, "kratail2tid@passkey", "kratail2tid@example.com")
s = strings.ReplaceAll(s, "kristoffer@dalby.cc", "kristoffer@example.com")
s = strings.ReplaceAll(s, "monitorpasskeykradalby@passkey", "monitorpasskeykradalby@example.com")
// buildGrantsNodesFromCapture constructs types.Nodes from a capture's
// topology section. Each scenario in tscap uses clean-slate mode, so
// node IPs differ between scenarios; this builds the node set with
// the IPs that were actually present during that capture.
func buildGrantsNodesFromCapture(
users types.Users,
tf *testcapture.Capture,
) types.Nodes {
nodes := make(types.Nodes, 0, len(tf.Topology.Nodes))
autoID := 1
return []byte(s)
for _, nodeDef := range tf.Topology.Nodes {
node := &types.Node{
ID: types.NodeID(autoID), //nolint:gosec
GivenName: nodeDef.Hostname,
IPv4: ptrAddr(nodeDef.IPv4),
IPv6: ptrAddr(nodeDef.IPv6),
Tags: nodeDef.Tags,
}
autoID++
hostinfo := &tailcfg.Hostinfo{}
if len(nodeDef.RoutableIPs) > 0 {
routableIPs := make([]netip.Prefix, 0, len(nodeDef.RoutableIPs))
for _, r := range nodeDef.RoutableIPs {
routableIPs = append(routableIPs, netip.MustParsePrefix(r))
}
hostinfo.RoutableIPs = routableIPs
}
node.Hostinfo = hostinfo
if len(nodeDef.ApprovedRoutes) > 0 {
approved := make([]netip.Prefix, 0, len(nodeDef.ApprovedRoutes))
for _, r := range nodeDef.ApprovedRoutes {
approved = append(approved, netip.MustParsePrefix(r))
}
node.ApprovedRoutes = approved
} else {
node.ApprovedRoutes = []netip.Prefix{}
}
// Assign user — untagged nodes look up by User field.
if len(nodeDef.Tags) == 0 && nodeDef.User != "" {
for i := range users {
if users[i].Name == nodeDef.User {
node.User = &users[i]
node.UserID = &users[i].ID
break
}
}
}
nodes = append(nodes, node)
}
return nodes
}
// loadGrantTestFile loads and parses a single grant test JSON file.
func loadGrantTestFile(t *testing.T, path string) grantTestFile {
// convertPolicyUserEmails used to map SaaS-side emails to @example.com.
// tscap now anonymizes the policy JSON at write time (kratail2tid -> odin,
// kristoffer -> thor, monitorpasskeykradalby -> freya), so the captured
// FullPolicy is already in its final form and this is a passthrough that
// just adapts the captured string value to the []byte that the policy
// parser expects.
func convertPolicyUserEmails(policyJSON string) []byte {
return []byte(policyJSON)
}
// loadGrantTestFile loads and parses a single grant capture HuJSON file.
func loadGrantTestFile(t *testing.T, path string) *testcapture.Capture {
t.Helper()
content, err := os.ReadFile(path)
c, err := testcapture.Read(path)
require.NoError(t, err, "failed to read test file %s", path)
ast, err := hujson.Parse(content)
require.NoError(t, err, "failed to parse HuJSON in %s", path)
ast.Standardize()
var tf grantTestFile
err = json.Unmarshal(ast.Pack(), &tf)
require.NoError(t, err, "failed to unmarshal test file %s", path)
return tf
return c
}
// Skip categories document WHY tests are expected to differ from Tailscale SaaS.
@@ -341,8 +363,8 @@ var grantSkipReasons = map[string]string{
// Tailscale SaaS policies can use user:*@passkey as a wildcard matching
// all passkey-authenticated users. headscale does not support passkey
// authentication and has no equivalent for this wildcard pattern.
"GRANT-K20": "USER_PASSKEY_WILDCARD: src=user:*@passkey not supported in headscale",
"GRANT-K21": "USER_PASSKEY_WILDCARD: dst=user:*@passkey not supported in headscale",
"grant-k20": "USER_PASSKEY_WILDCARD: src=user:*@passkey not supported in headscale",
"grant-k21": "USER_PASSKEY_WILDCARD: dst=user:*@passkey not supported in headscale",
}
// TestGrantsCompat is a data-driven test that loads all GRANT-*.json
@@ -354,22 +376,21 @@ var grantSkipReasons = map[string]string{
// - For success cases: expected packet_filter_rules per node
// - For error cases: expected error message
//
// The test converts Tailscale user email formats (@passkey, @dalby.cc) to
// headscale format (@example.com) and runs the policy through unmarshalPolicy,
// The test converts Tailscale user email formats to headscale format
// (@example.com, @example.org) and runs the policy through unmarshalPolicy,
// validate, compileFilterRulesForNode, and ReduceFilterRules.
//
// 2 tests are skipped for user:*@passkey wildcard (not supported in headscale).
func TestGrantsCompat(t *testing.T) {
t.Parallel()
files, err := filepath.Glob(filepath.Join("testdata", "grant_results", "GRANT-*.hujson"))
files, err := filepath.Glob(filepath.Join("testdata", "grant_results", "*-*.hujson"))
require.NoError(t, err, "failed to glob test files")
require.NotEmpty(t, files, "no GRANT-*.hujson test files found in testdata/grant_results/")
require.NotEmpty(t, files, "no grant test files found in testdata/grant_results/")
t.Logf("Loaded %d grant test files", len(files))
users := setupGrantsCompatUsers()
allNodes := setupGrantsCompatNodes(users)
for _, file := range files {
tf := loadGrantTestFile(t, file)
@@ -383,17 +404,13 @@ func TestGrantsCompat(t *testing.T) {
return
}
// Determine which node set to use based on the test's topology.
// Tests captured with the expanded 15-node topology (V26+) have
// nodes like exit-a, group-a-client, etc. Tests from the original
// 8-node topology should only use the first 8 nodes to avoid
// resolving extra IPs from nodes that weren't present during capture.
nodes := allNodes
if _, hasNewNodes := tf.Captures["exit-a"]; !hasNewNodes {
nodes = allNodes[:8]
}
// Build nodes per-scenario from this file's topology.
// tscap uses clean-slate mode, so each scenario has
// different node IPs.
nodes := buildGrantsNodesFromCapture(users, tf)
// Convert Tailscale user emails to headscale @example.com format
// Use the captured full policy verbatim (anonymization
// in tscap already rewrote SaaS emails).
policyJSON := convertPolicyUserEmails(tf.Input.FullPolicy)
if tf.Input.APIResponseCode == 400 || tf.Error {
@@ -407,7 +424,7 @@ func TestGrantsCompat(t *testing.T) {
}
// testGrantError verifies that an invalid policy produces the expected error.
func testGrantError(t *testing.T, policyJSON []byte, tf grantTestFile) {
func testGrantError(t *testing.T, policyJSON []byte, tf *testcapture.Capture) {
t.Helper()
wantMsg := ""
@@ -439,91 +456,28 @@ func testGrantError(t *testing.T, policyJSON []byte, tf grantTestFile) {
tf.TestID, wantMsg)
}
// grantErrorMessageMap maps Tailscale error messages to their headscale equivalents
// where the wording differs but the meaning is the same.
var grantErrorMessageMap = map[string]string{
// Tailscale says "ip and app can not both be empty",
// headscale says "grants must specify either 'ip' or 'app' field"
"ip and app can not both be empty": "grants must specify either",
// Tailscale says "via can only be a tag",
// headscale rejects at unmarshal time via Tag.UnmarshalJSON: "tag must start with 'tag:'"
"via can only be a tag": "tag must start with",
}
// assertGrantErrorContains checks that an error message contains the expected
// Tailscale error message (or its headscale equivalent).
// assertGrantErrorContains requires that headscale's error contains
// the Tailscale SaaS error message verbatim. Divergence means an
// emitter needs to be aligned, not papered over with a translation
// table.
func assertGrantErrorContains(t *testing.T, err error, wantMsg string, testID string) {
t.Helper()
errStr := err.Error()
// First try direct substring match
if strings.Contains(errStr, wantMsg) {
return
}
// Try mapped equivalent
if mapped, ok := grantErrorMessageMap[wantMsg]; ok {
if strings.Contains(errStr, mapped) {
return
}
}
// Try matching key parts of the error message
// Extract the most distinctive part of the Tailscale message
keyParts := extractErrorKeyParts(wantMsg)
for _, part := range keyParts {
if strings.Contains(errStr, part) {
return
}
}
t.Errorf("%s: error message mismatch\n tailscale wants: %q\n headscale got: %q",
testID, wantMsg, errStr)
}
// extractErrorKeyParts extracts distinctive substrings from an error message
// that should appear in any equivalent error message.
func extractErrorKeyParts(msg string) []string {
var parts []string
// Common patterns to extract
if strings.Contains(msg, "tag:") {
// Extract tag references like tag:nonexistent
for word := range strings.FieldsSeq(msg) {
word = strings.Trim(word, `"'`)
if strings.HasPrefix(word, "tag:") {
parts = append(parts, word)
}
}
}
if strings.Contains(msg, "autogroup:") {
for word := range strings.FieldsSeq(msg) {
word = strings.Trim(word, `"'`)
if strings.HasPrefix(word, "autogroup:") {
parts = append(parts, word)
}
}
}
if strings.Contains(msg, "capability name") {
parts = append(parts, "capability")
}
if strings.Contains(msg, "port range") {
parts = append(parts, "port")
}
return parts
}
// testGrantSuccess verifies that a valid policy produces the expected
// packet filter rules for each node.
func testGrantSuccess(
t *testing.T,
policyJSON []byte,
tf grantTestFile,
tf *testcapture.Capture,
users types.Users,
nodes types.Nodes,
) {
@@ -537,37 +491,9 @@ func testGrantSuccess(
for nodeName, capture := range tf.Captures {
t.Run(nodeName, func(t *testing.T) {
// Check if this node was offline during capture.
// tagged-prod was frequently offline (132 of 188 success tests).
// When offline, packet_filter_rules is null and topology shows
// hostname="unknown" with empty tags.
captureIsNull := len(capture.PacketFilterRules) == 0 ||
string(capture.PacketFilterRules) == "null"
if captureIsNull {
topoNode, exists := tf.Topology.Nodes[nodeName]
if exists && (topoNode.Hostname == "unknown" || topoNode.Hostname == "") {
t.Skipf(
"node %s was offline during Tailscale capture (hostname=%q)",
nodeName,
topoNode.Hostname,
)
return
}
// Node was online but has null/empty rules — means Tailscale
// produced no rules. headscale should also produce no rules.
}
node := findGrantsNode(nodes, nodeName)
if node == nil {
t.Skipf(
"node %s not found in test setup (may be a test-specific node)",
nodeName,
)
return
}
require.NotNilf(t, node,
"golden node %s not found in test setup", nodeName)
// Compile headscale filter rules for this node
gotRules, err := pol.compileFilterRulesForNode(
@@ -585,21 +511,7 @@ func testGrantSuccess(
gotRules = policyutil.ReduceFilterRules(node.View(), gotRules)
// Unmarshal Tailscale expected rules from JSON capture
var wantRules []tailcfg.FilterRule
if !captureIsNull {
err = json.Unmarshal(
[]byte(capture.PacketFilterRules),
&wantRules,
)
require.NoError(
t,
err,
"%s/%s: failed to unmarshal expected rules from JSON",
tf.TestID,
nodeName,
)
}
wantRules := capture.PacketFilterRules
// Compare headscale output against Tailscale expected output.
// The diff labels show (-tailscale +headscale) to make clear

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +1,22 @@
// This file is "generated" by Claude.
// It contains a data-driven test that reads SSH-*.json test files captured
// from Tailscale SaaS. Each file contains:
// - The SSH section of the policy
// - The expected SSHPolicy rules for each of 5 test nodes
// This file implements a data-driven test runner for SSH compatibility tests.
// It loads HuJSON golden files from testdata/ssh_results/ssh-*.hujson, captured
// from Tailscale SaaS by tscap, and compares headscale's SSH policy compilation
// against the captured SSH rules.
//
// The test loads each JSON file, constructs a full policy from the SSH section,
// applies it through headscale's SSH policy compilation, and compares the output
// against Tailscale's actual behavior.
// Each file is a testcapture.Capture containing:
// - The full policy that was POSTed to Tailscale SaaS (we use tf.Input.FullPolicy
// directly instead of reconstructing it from a sub-section)
// - The expected SSH rules for each of the 8 test nodes (in tf.Captures[name].SSHRules)
//
// Tests that are known to fail due to unimplemented features or known
// differences are skipped with a TODO comment explaining the root cause.
// As headscale's SSH implementation improves, tests should be removed
// from the skip list.
// Tests known to fail due to unimplemented features or known differences are
// skipped with a TODO comment explaining the root cause.
//
// Test data source: testdata/ssh_results/SSH-*.json
// Captured from: Tailscale SaaS API + tailscale debug localapi
// Test data source: testdata/ssh_results/ssh-*.hujson
// Source format: github.com/juanfont/headscale/hscontrol/types/testcapture
package v2
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
@@ -28,63 +24,55 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/testcapture"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tailscale/hujson"
"gorm.io/gorm"
"tailscale.com/tailcfg"
)
// sshTestFile represents the JSON structure of a captured SSH test file.
type sshTestFile struct {
TestID string `json:"test_id"`
PolicyFile string `json:"policy_file"`
SSHSection json.RawMessage `json:"ssh_section"`
Nodes map[string]sshNodeCapture `json:"nodes"`
}
// sshNodeCapture represents the expected SSH rules for a single node.
type sshNodeCapture struct {
Rules json.RawMessage `json:"rules"`
}
// setupSSHDataCompatUsers returns the 3 test users for SSH data-driven
// compatibility tests. The user configuration matches the Tailscale test
// environment with email domains preserved for localpart matching:
// - kratail2tid@example.com (converted from @passkey)
// - kristoffer@dalby.cc (kept as-is — different domain for localpart exclusion)
// - monitorpasskeykradalby@example.com (converted from @passkey)
// compatibility tests. Users get norse-god names; nodes get original-151
// pokémon names — matching the anonymized identifiers tscap writes into
// the capture files (see github.com/kradalby/tscap/anonymize).
//
// odin and freya live on @example.com; thor lives on @example.org so
// that "localpart:*@example.com" resolves to exactly two users
// (matching SaaS output) and the "user on a different email domain"
// case stays covered by scenarios like ssh-d1 that use
// "localpart:*@example.org".
func setupSSHDataCompatUsers() types.Users {
return types.Users{
{
Model: gorm.Model{ID: 1},
Name: "kratail2tid",
Email: "kratail2tid@example.com",
Name: "odin",
Email: "odin@example.com",
},
{
Model: gorm.Model{ID: 2},
Name: "kristoffer",
Email: "kristoffer@dalby.cc",
Name: "thor",
Email: "thor@example.org",
},
{
Model: gorm.Model{ID: 3},
Name: "monitorpasskeykradalby",
Email: "monitorpasskeykradalby@example.com",
Name: "freya",
Email: "freya@example.com",
},
}
}
// setupSSHDataCompatNodes returns the 5 test nodes for SSH data-driven
// compatibility tests. Node GivenNames match the keys in the JSON files:
// - user1 (owned by kratail2tid)
// - user-kris (owned by kristoffer)
// - user-mon (owned by monitorpasskeykradalby)
// - tagged-server (tag:server)
// - tagged-prod (tag:prod)
// setupSSHDataCompatNodes returns the test nodes for SSH data-driven
// compatibility tests. Node GivenNames match the anonymized pokémon names:
// - bulbasaur (owned by odin)
// - ivysaur (owned by thor)
// - venusaur (owned by freya)
// - beedrill (tag:server)
// - kakuna (tag:prod)
func setupSSHDataCompatNodes(users types.Users) types.Nodes {
return types.Nodes{
&types.Node{
ID: 1,
GivenName: "user1",
GivenName: "bulbasaur",
User: &users[0],
UserID: &users[0].ID,
IPv4: ptrAddr("100.90.199.68"),
@@ -93,7 +81,7 @@ func setupSSHDataCompatNodes(users types.Users) types.Nodes {
},
&types.Node{
ID: 2,
GivenName: "user-kris",
GivenName: "ivysaur",
User: &users[1],
UserID: &users[1].ID,
IPv4: ptrAddr("100.110.121.96"),
@@ -102,7 +90,7 @@ func setupSSHDataCompatNodes(users types.Users) types.Nodes {
},
&types.Node{
ID: 3,
GivenName: "user-mon",
GivenName: "venusaur",
User: &users[2],
UserID: &users[2].ID,
IPv4: ptrAddr("100.103.90.82"),
@@ -111,7 +99,7 @@ func setupSSHDataCompatNodes(users types.Users) types.Nodes {
},
&types.Node{
ID: 4,
GivenName: "tagged-server",
GivenName: "beedrill",
IPv4: ptrAddr("100.108.74.26"),
IPv6: ptrAddr("fd7a:115c:a1e0::b901:4a87"),
Tags: []string{"tag:server"},
@@ -119,7 +107,7 @@ func setupSSHDataCompatNodes(users types.Users) types.Nodes {
},
&types.Node{
ID: 5,
GivenName: "tagged-prod",
GivenName: "kakuna",
IPv4: ptrAddr("100.103.8.15"),
IPv6: ptrAddr("fd7a:115c:a1e0::5b37:80f"),
Tags: []string{"tag:prod"},
@@ -128,122 +116,65 @@ func setupSSHDataCompatNodes(users types.Users) types.Nodes {
}
}
// convertSSHPolicyEmails converts Tailscale SaaS email domains to
// headscale-compatible format in the raw policy JSON.
//
// Tailscale uses provider-specific email formats:
// - kratail2tid@passkey (passkey auth)
// - kristoffer@dalby.cc (email auth — kept as-is)
// - monitorpasskeykradalby@passkey (passkey auth)
//
// The @passkey domain is converted to @example.com. The @dalby.cc domain
// is kept as-is to preserve localpart matching semantics (kristoffer should
// NOT match localpart:*@example.com, just as it doesn't match
// localpart:*@passkey in Tailscale SaaS).
func convertSSHPolicyEmails(s string) string {
s = strings.ReplaceAll(s, "@passkey", "@example.com")
return s
}
// constructSSHFullPolicy builds a complete headscale policy from the
// ssh_section captured from Tailscale SaaS.
//
// The base policy includes:
// - groups matching the Tailscale test environment
// - tagOwners for tag:server and tag:prod
// - A permissive ACL allowing all traffic (matches the grants wildcard
// in the original Tailscale policy)
// - The SSH section from the test file
func constructSSHFullPolicy(sshSection json.RawMessage) string {
// Base policy template with groups, tagOwners, and ACLs
// User references match the converted email addresses.
const basePolicyPrefix = `{
"groups": {
"group:admins": ["kratail2tid@example.com"],
"group:developers": ["kristoffer@dalby.cc", "kratail2tid@example.com"],
"group:empty": []
},
"tagOwners": {
"tag:server": ["kratail2tid@example.com"],
"tag:prod": ["kratail2tid@example.com"]
},
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]`
// Handle null or empty SSH section
if len(sshSection) == 0 || string(sshSection) == "null" {
// No SSH section at all (like SSH-E4)
return basePolicyPrefix + "\n}"
}
sshStr := string(sshSection)
// Convert Tailscale email domains
sshStr = convertSSHPolicyEmails(sshStr)
return basePolicyPrefix + `,
"ssh": ` + sshStr + "\n}"
}
// loadSSHTestFile loads and parses a single SSH test JSON file.
func loadSSHTestFile(t *testing.T, path string) sshTestFile {
// loadSSHTestFile loads and parses a single SSH capture HuJSON file.
func loadSSHTestFile(t *testing.T, path string) *testcapture.Capture {
t.Helper()
content, err := os.ReadFile(path)
c, err := testcapture.Read(path)
require.NoError(t, err, "failed to read test file %s", path)
ast, err := hujson.Parse(content)
require.NoError(t, err, "failed to parse HuJSON in %s", path)
ast.Standardize()
var tf sshTestFile
err = json.Unmarshal(ast.Pack(), &tf)
require.NoError(t, err, "failed to unmarshal test file %s", path)
return tf
return c
}
// sshSkipReasons documents why each skipped test fails and what needs to be
// fixed. Tests are grouped by root cause to identify high-impact changes.
//
// 37 of 39 tests are expected to pass.
var sshSkipReasons = map[string]string{
// user:*@passkey wildcard pattern not supported in headscale.
// USER_PASSKEY_WILDCARD (2 tests)
//
// headscale does not support passkey authentication and has no
// equivalent for this wildcard pattern.
"SSH-B5": "user:*@passkey wildcard not supported in headscale",
"SSH-D10": "user:*@passkey wildcard not supported in headscale",
// equivalent for the user:*@passkey wildcard pattern.
"ssh-b5": "user:*@passkey wildcard not supported in headscale",
"ssh-d10": "user:*@passkey wildcard not supported in headscale",
// DOMAIN_NOT_ASSOCIATED (4 tests)
//
// SaaS validates that email domains in user:*@domain and
// localpart:*@domain expressions are configured tailnet domains.
// headscale has no concept of "associated tailnet domains" — it
// only has users with email addresses. These policies are
// legitimately rejected by SaaS but not by headscale.
"ssh-b4": "domain validation: headscale has no 'associated tailnet domains' concept",
"ssh-d1": "domain validation: headscale has no 'associated tailnet domains' concept",
"ssh-e1": "domain validation: headscale has no 'associated tailnet domains' concept",
"ssh-e2": "domain validation: headscale has no 'associated tailnet domains' concept",
}
// TestSSHDataCompat is a data-driven test that loads all SSH-*.json test files
// captured from Tailscale SaaS and compares headscale's SSH policy compilation
// against the real Tailscale behavior.
// TestSSHDataCompat is a data-driven test that loads all ssh-*.hujson test
// files captured from Tailscale SaaS and compares headscale's SSH policy
// compilation against the real Tailscale behavior.
//
// Each JSON file contains:
// - The SSH section of the policy
// - Expected SSH rules per node (5 nodes)
// Each capture file contains:
// - The full policy that was POSTed to the SaaS API (Input.FullPolicy)
// - Expected SSH rules per node (Captures[name].SSHRules)
//
// The test constructs a full headscale policy from the SSH section, converts
// Tailscale user email formats to headscale format, and runs the policy
// through unmarshalPolicy and compileSSHPolicy.
// The test converts Tailscale user email formats to headscale format and runs
// the captured policy through unmarshalPolicy and compileSSHPolicy.
func TestSSHDataCompat(t *testing.T) {
t.Parallel()
files, err := filepath.Glob(
filepath.Join("testdata", "ssh_results", "SSH-*.hujson"),
filepath.Join("testdata", "ssh_results", "ssh-*.hujson"),
)
require.NoError(t, err, "failed to glob test files")
require.NotEmpty(
t,
files,
"no SSH-*.hujson test files found in testdata/ssh_results/",
"no ssh-*.hujson test files found in testdata/ssh_results/",
)
t.Logf("Loaded %d SSH test files", len(files))
users := setupSSHDataCompatUsers()
nodes := setupSSHDataCompatNodes(users)
for _, file := range files {
tf := loadSSHTestFile(t, file)
@@ -261,8 +192,21 @@ func TestSSHDataCompat(t *testing.T) {
return
}
// Construct full policy from SSH section
policyJSON := constructSSHFullPolicy(tf.SSHSection)
// SaaS rejected this policy — verify headscale also rejects it.
if tf.Error {
testSSHError(t, tf)
return
}
// Build nodes per-scenario from this file's topology.
// tscap uses clean-slate mode, so each scenario has
// different node IPs.
nodes := buildGrantsNodesFromCapture(users, tf)
// Use the captured full policy verbatim. Anonymization in
// tscap already rewrites SaaS emails to @example.com.
policyJSON := tf.Input.FullPolicy
pol, err := unmarshalPolicy([]byte(policyJSON))
require.NoError(
@@ -273,19 +217,15 @@ func TestSSHDataCompat(t *testing.T) {
policyJSON,
)
for nodeName, capture := range tf.Nodes {
for nodeName, capture := range tf.Captures {
t.Run(nodeName, func(t *testing.T) {
node := findNodeByGivenName(nodes, nodeName)
require.NotNilf(
t,
node,
"node %s not found in test setup",
nodeName,
)
require.NotNilf(t, node,
"golden node %s not found in test setup", nodeName)
// Compile headscale SSH policy for this node
gotSSH, err := pol.compileSSHPolicy(
"unused-server-url",
"https://unused",
users,
node.View(),
nodes.ViewSlice(),
@@ -298,24 +238,10 @@ func TestSSHDataCompat(t *testing.T) {
nodeName,
)
// Parse expected rules from JSON capture
var wantRules []*tailcfg.SSHRule
if len(capture.Rules) > 0 &&
string(capture.Rules) != "null" {
err = json.Unmarshal(capture.Rules, &wantRules)
require.NoError(
t,
err,
"%s/%s: failed to unmarshal expected rules",
tf.TestID,
nodeName,
)
}
// Build expected SSHPolicy from the rules
// Build expected SSHPolicy from the typed rules.
var wantSSH *tailcfg.SSHPolicy
if len(wantRules) > 0 {
wantSSH = &tailcfg.SSHPolicy{Rules: wantRules}
if len(capture.SSHRules) > 0 {
wantSSH = &tailcfg.SSHPolicy{Rules: capture.SSHRules}
}
// Normalize: treat empty-rules SSHPolicy as nil
@@ -327,6 +253,7 @@ func TestSSHDataCompat(t *testing.T) {
// EquateEmpty treats nil and empty slices as equal.
// Sort principals within rules (order doesn't matter).
// Do NOT sort rules — order matters (first-match-wins).
//
opts := cmp.Options{
cmpopts.SortSlices(func(a, b *tailcfg.SSHPrincipal) bool {
return a.NodeIP < b.NodeIP
@@ -341,8 +268,128 @@ func TestSSHDataCompat(t *testing.T) {
diff,
)
}
// Separate presence check: the fields ignored by
// the diff above must still be populated on matching
// rules. This catches regressions where headscale
// would silently drop the HoldAndDelegate URL or
// flip Accept to false while we are not looking.
if wantSSH != nil && gotSSH != nil {
for i, wantRule := range wantSSH.Rules {
if i >= len(gotSSH.Rules) {
break
}
gotRule := gotSSH.Rules[i]
if wantRule.Action == nil || gotRule.Action == nil {
continue
}
wantIsCheck := wantRule.Action.HoldAndDelegate != ""
gotIsCheck := gotRule.Action.HoldAndDelegate != ""
assert.Equalf(t, wantIsCheck, gotIsCheck,
"%s/%s rule %d: HoldAndDelegate presence mismatch",
tf.TestID, nodeName, i,
)
}
}
})
}
})
}
}
// sshErrorMessageMap maps Tailscale SaaS error substrings to headscale
// equivalents where the wording differs but the meaning is the same.
var sshErrorMessageMap = map[string]string{}
// testSSHError verifies that an invalid policy produces the expected error.
func testSSHError(t *testing.T, tf *testcapture.Capture) {
t.Helper()
policyJSON := []byte(tf.Input.FullPolicy)
pol, err := unmarshalPolicy(policyJSON)
if err != nil {
// Parse-time error.
if tf.Input.APIResponseBody != nil {
wantMsg := tf.Input.APIResponseBody.Message
if wantMsg != "" {
assertSSHErrorContains(t, err, wantMsg, tf.TestID)
}
}
return
}
err = pol.validate()
if err != nil {
if tf.Input.APIResponseBody != nil {
wantMsg := tf.Input.APIResponseBody.Message
if wantMsg != "" {
assertSSHErrorContains(t, err, wantMsg, tf.TestID)
}
}
return
}
t.Errorf(
"%s: expected error but policy parsed and validated successfully",
tf.TestID,
)
}
// assertSSHErrorContains checks that an error message matches the
// expected Tailscale SaaS message, using progressive fallbacks:
// 1. Direct substring match
// 2. Mapped equivalent from sshErrorMessageMap
// 3. Key-part extraction (tags, autogroups)
// 4. t.Errorf on no match (strict)
func assertSSHErrorContains(
t *testing.T,
err error,
wantMsg string,
testID string,
) {
t.Helper()
errStr := err.Error()
// 1. Direct substring match.
if strings.Contains(errStr, wantMsg) {
return
}
// 2. Mapped equivalent.
for tsKey, hsKey := range sshErrorMessageMap {
if strings.Contains(wantMsg, tsKey) &&
strings.Contains(errStr, hsKey) {
return
}
}
// 3. Key-part extraction.
for _, part := range []string{
"autogroup:",
"tag:",
"undefined",
"not valid",
} {
if strings.Contains(wantMsg, part) &&
strings.Contains(errStr, part) {
return
}
}
// 4. No match — strict failure.
t.Errorf(
"%s: error message mismatch\n"+
" want (tailscale): %q\n"+
" got (headscale): %q",
testID,
wantMsg,
errStr,
)
}