hscontrol/types: silence zerolog by default in tests

Tests were dumping megabytes of zerolog output on failure; silence
at init and let individual tests opt in via SetGlobalLevel when they need
log-driven assertions.

Updates #3157
This commit is contained in:
Kristoffer Dalby
2026-04-15 08:28:11 +00:00
parent affaa1a31d
commit 1059c678c4
7 changed files with 66 additions and 156 deletions

View File

@@ -9,7 +9,6 @@ import (
"testing"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog"
"zombiezen.com/go/postgrestest"
)
@@ -20,7 +19,6 @@ func newSQLiteTestDB() (*HSDatabase, error) {
}
log.Printf("database path: %s", tmpDir+"/headscale_test.db")
zerolog.SetGlobalLevel(zerolog.Disabled)
db, err := NewHeadscaleDatabase(
&types.Config{

View File

@@ -18,7 +18,6 @@ import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"tailscale.com/tailcfg"
)
@@ -182,9 +181,6 @@ func benchBatcher(nodeCount, bufferSize int) (*Batcher, map[types.NodeID]chan *t
// BenchmarkAddToBatch_Broadcast measures the cost of broadcasting a change
// to all nodes via addToBatch (no worker processing, just queuing).
func BenchmarkAddToBatch_Broadcast(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -213,9 +209,6 @@ func BenchmarkAddToBatch_Broadcast(b *testing.B) {
// BenchmarkAddToBatch_Targeted measures the cost of adding a targeted change
// to a single node.
func BenchmarkAddToBatch_Targeted(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -251,9 +244,6 @@ func BenchmarkAddToBatch_Targeted(b *testing.B) {
// BenchmarkAddToBatch_FullUpdate measures the cost of a FullUpdate broadcast.
func BenchmarkAddToBatch_FullUpdate(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -275,9 +265,6 @@ func BenchmarkAddToBatch_FullUpdate(b *testing.B) {
// BenchmarkProcessBatchedChanges measures the cost of moving pending changes
// to the work queue.
func BenchmarkProcessBatchedChanges(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dpending", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -311,9 +298,6 @@ func BenchmarkProcessBatchedChanges(b *testing.B) {
// BenchmarkBroadcastToN measures end-to-end broadcast: addToBatch + processBatchedChanges
// to N nodes. Does NOT include worker processing (MapResponse generation).
func BenchmarkBroadcastToN(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -339,9 +323,6 @@ func BenchmarkBroadcastToN(b *testing.B) {
// BenchmarkMultiChannelBroadcast measures the cost of sending a MapResponse
// to N nodes each with varying connection counts.
func BenchmarkMultiChannelBroadcast(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, b.N+1)
@@ -387,9 +368,6 @@ func BenchmarkMultiChannelBroadcast(b *testing.B) {
// BenchmarkConcurrentAddToBatch measures addToBatch throughput under
// concurrent access from multiple goroutines.
func BenchmarkConcurrentAddToBatch(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 10)
@@ -440,9 +418,6 @@ func BenchmarkConcurrentAddToBatch(b *testing.B) {
// BenchmarkIsConnected measures the read throughput of IsConnected checks.
func BenchmarkIsConnected(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, _ := benchBatcher(nodeCount, 1)
@@ -464,9 +439,6 @@ func BenchmarkIsConnected(b *testing.B) {
// BenchmarkConnectedMap measures the cost of building the full connected map.
func BenchmarkConnectedMap(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, channels := benchBatcher(nodeCount, 1)
@@ -499,9 +471,6 @@ func BenchmarkConnectedMap(b *testing.B) {
// BenchmarkConnectionChurn measures the cost of add/remove connection cycling
// which simulates client reconnection patterns.
func BenchmarkConnectionChurn(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, channels := benchBatcher(nodeCount, 10)
@@ -545,9 +514,6 @@ func BenchmarkConnectionChurn(b *testing.B) {
// BenchmarkConcurrentSendAndChurn measures the combined cost of sends happening
// concurrently with connection churn - the hot path in production.
func BenchmarkConcurrentSendAndChurn(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
batcher, channels := benchBatcher(nodeCount, 100)
@@ -620,9 +586,6 @@ func BenchmarkAddNode(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)
@@ -683,9 +646,6 @@ func BenchmarkFullPipeline(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)
@@ -738,9 +698,6 @@ func BenchmarkMapResponseFromChange(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 100} {
b.Run(fmt.Sprintf("%dnodes", nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)

View File

@@ -22,7 +22,6 @@ import (
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
@@ -473,9 +472,6 @@ func TestProcessBatchedChanges_BundlesChangesPerNode(t *testing.T) {
// could process bundles from tick N and tick N+1 concurrently for the same
// node, causing out-of-order delivery and races on lastSentPeers.
func TestWorkMu_PreventsInterTickRace(t *testing.T) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
mc := newMultiChannelNodeConn(1, nil)
ch := make(chan *tailcfg.MapResponse, 100)
entry := &connectionEntry{
@@ -849,9 +845,6 @@ func TestBug3_CleanupOfflineNodes_TOCTOU(t *testing.T) {
// BUG: batcher_lockfree.go worker() - no nil check after b.nodes.Load()
// FIX: Add nil guard: `exists && nc != nil` in both sync and async paths.
func TestBug5_WorkerPanicKillsWorkerPermanently(t *testing.T) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 3, 10)
defer lb.cleanup()
@@ -927,9 +920,6 @@ func TestBug5_WorkerPanicKillsWorkerPermanently(t *testing.T) {
// BUG: batcher_lockfree.go:163-166 - Start() has no "already started" check
// FIX: Add sync.Once or atomic.Bool to prevent multiple Start() calls.
func TestBug6_StartCalledMultipleTimes_GoroutineLeak(t *testing.T) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 3, 10)
lb.b.workers = 2
@@ -1042,9 +1032,6 @@ func TestBug7_CleanupOfflineNodes_PendingChangesCleanedStructurally(t *testing.T
// (timeouts happen here), then write-lock only to remove failed connections.
// The lock is now held only for O(N) pointer copies, not for N*50ms I/O.
func TestBug8_SerialTimeoutUnderWriteLock(t *testing.T) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
mc := newMultiChannelNodeConn(1, nil)
// Add 5 stale connections (unbuffered, no reader = will timeout at 50ms each)
@@ -1148,9 +1135,6 @@ func TestScale1000_AddToBatch_Broadcast(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1181,9 +1165,6 @@ func TestScale1000_ProcessBatchedWithConcurrentAdd(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1236,9 +1217,6 @@ func TestScale1000_MultiChannelBroadcast(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
const (
nodeCount = 1000
bufferSize = 5
@@ -1339,9 +1317,6 @@ func TestScale1000_ConnectionChurn(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 20)
defer lb.cleanup()
@@ -1441,9 +1416,6 @@ func TestScale1000_ConcurrentAddRemove(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1485,9 +1457,6 @@ func TestScale1000_IsConnectedConsistency(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1554,9 +1523,6 @@ func TestScale1000_BroadcastDuringNodeChurn(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1641,9 +1607,6 @@ func TestScale1000_WorkChannelSaturation(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
// Create batcher with SMALL work channel to force saturation
b := &Batcher{
tick: time.NewTicker(10 * time.Millisecond),
@@ -1719,9 +1682,6 @@ func TestScale1000_FullUpdate_AllNodesGetPending(t *testing.T) {
t.Skip("skipping 1000-node test in short mode")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
lb := setupLightweightBatcher(t, 1000, 10)
defer lb.cleanup()
@@ -1757,9 +1717,6 @@ func TestScale1000_AllToAll_FullPipeline(t *testing.T) {
t.Skip("skipping 1000-node test with race detector (bcrypt setup too slow)")
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
t.Logf("setting up 1000-node test environment (this may take a minute)...")
testData, cleanup := setupBatcherWithTestData(t, NewBatcherAndMapper, 1, 1000, 200)

View File

@@ -21,7 +21,6 @@ import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/rs/zerolog"
"tailscale.com/tailcfg"
)
@@ -40,9 +39,6 @@ var (
// BenchmarkScale_IsConnected tests single-node lookup at increasing map sizes.
func BenchmarkScale_IsConnected(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsO1 {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 1)
@@ -65,9 +61,6 @@ func BenchmarkScale_IsConnected(b *testing.B) {
// BenchmarkScale_AddToBatch_Targeted tests single-node targeted change at
// increasing map sizes. The map size should not affect per-operation cost.
func BenchmarkScale_AddToBatch_Targeted(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsO1 {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -104,9 +97,6 @@ func BenchmarkScale_AddToBatch_Targeted(b *testing.B) {
// BenchmarkScale_ConnectionChurn tests add/remove connection cycle.
// The map size should not affect per-operation cost for a single node.
func BenchmarkScale_ConnectionChurn(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsO1 {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, channels := benchBatcher(n, 10)
@@ -152,9 +142,6 @@ func BenchmarkScale_ConnectionChurn(b *testing.B) {
// BenchmarkScale_AddToBatch_Broadcast tests broadcasting a change to ALL nodes.
// Cost should scale linearly with node count.
func BenchmarkScale_AddToBatch_Broadcast(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsLinear {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -182,9 +169,6 @@ func BenchmarkScale_AddToBatch_Broadcast(b *testing.B) {
// BenchmarkScale_AddToBatch_FullUpdate tests FullUpdate broadcast cost.
func BenchmarkScale_AddToBatch_FullUpdate(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsLinear {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -205,9 +189,6 @@ func BenchmarkScale_AddToBatch_FullUpdate(b *testing.B) {
// BenchmarkScale_ProcessBatchedChanges tests draining pending changes into work queue.
func BenchmarkScale_ProcessBatchedChanges(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsLinear {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -238,9 +219,6 @@ func BenchmarkScale_ProcessBatchedChanges(b *testing.B) {
// BenchmarkScale_BroadcastToN tests end-to-end: addToBatch + processBatchedChanges.
func BenchmarkScale_BroadcastToN(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsLinear {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -267,9 +245,6 @@ func BenchmarkScale_BroadcastToN(b *testing.B) {
// This isolates the multiChannelNodeConn.send() cost.
// Uses large buffered channels to avoid goroutine drain overhead.
func BenchmarkScale_SendToAll(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsLinear {
b.Run(strconv.Itoa(n), func(b *testing.B) {
// b.N+1 buffer so sends never block
@@ -300,9 +275,6 @@ func BenchmarkScale_SendToAll(b *testing.B) {
// BenchmarkScale_ConnectedMap tests building the full connected/disconnected map.
func BenchmarkScale_ConnectedMap(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsHeavy {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, channels := benchBatcher(n, 1)
@@ -335,9 +307,6 @@ func BenchmarkScale_ConnectedMap(b *testing.B) {
// BenchmarkScale_ComputePeerDiff tests peer diff computation at scale.
// Each node tracks N-1 peers, with 10% removed.
func BenchmarkScale_ComputePeerDiff(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsHeavy {
b.Run(strconv.Itoa(n), func(b *testing.B) {
mc := newMultiChannelNodeConn(1, nil)
@@ -366,9 +335,6 @@ func BenchmarkScale_ComputePeerDiff(b *testing.B) {
// BenchmarkScale_UpdateSentPeers_Full tests full peer list update.
func BenchmarkScale_UpdateSentPeers_Full(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsHeavy {
b.Run(strconv.Itoa(n), func(b *testing.B) {
mc := newMultiChannelNodeConn(1, nil)
@@ -391,9 +357,6 @@ func BenchmarkScale_UpdateSentPeers_Full(b *testing.B) {
// BenchmarkScale_UpdateSentPeers_Incremental tests incremental peer updates (10% new).
func BenchmarkScale_UpdateSentPeers_Incremental(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsHeavy {
b.Run(strconv.Itoa(n), func(b *testing.B) {
mc := newMultiChannelNodeConn(1, nil)
@@ -428,9 +391,6 @@ func BenchmarkScale_UpdateSentPeers_Incremental(b *testing.B) {
// ~1.6 connections on average (every 3rd node has 3 connections).
// Uses large buffered channels to avoid goroutine drain overhead.
func BenchmarkScale_MultiChannelBroadcast(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsHeavy {
b.Run(strconv.Itoa(n), func(b *testing.B) {
// Use b.N+1 buffer so sends never block
@@ -480,9 +440,6 @@ func BenchmarkScale_MultiChannelBroadcast(b *testing.B) {
// BenchmarkScale_ConcurrentAddToBatch tests parallel addToBatch throughput.
func BenchmarkScale_ConcurrentAddToBatch(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsConc {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, _ := benchBatcher(n, 10)
@@ -529,9 +486,6 @@ func BenchmarkScale_ConcurrentAddToBatch(b *testing.B) {
// sending to all nodes while 10% of connections are churning concurrently.
// Uses large buffered channels to avoid goroutine drain overhead.
func BenchmarkScale_ConcurrentSendAndChurn(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsConc {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, channels := benchBatcher(n, b.N+1)
@@ -602,9 +556,6 @@ func BenchmarkScale_ConcurrentSendAndChurn(b *testing.B) {
// - 10% full updates (broadcast with full map)
// All while 10% of connections are churning.
func BenchmarkScale_MixedWorkload(b *testing.B) {
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, n := range scaleCountsConc {
b.Run(strconv.Itoa(n), func(b *testing.B) {
batcher, channels := benchBatcher(n, 10)
@@ -722,9 +673,6 @@ func BenchmarkScale_AddAllNodes(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 50, 100, 200, 500} {
b.Run(strconv.Itoa(nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)
@@ -785,9 +733,6 @@ func BenchmarkScale_SingleAddNode(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 50, 100, 200, 500, 1000} {
b.Run(strconv.Itoa(nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)
@@ -851,9 +796,6 @@ func BenchmarkScale_MapResponse_DERPMap(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 50, 100, 200, 500} {
b.Run(strconv.Itoa(nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)
@@ -903,9 +845,6 @@ func BenchmarkScale_MapResponse_FullUpdate(b *testing.B) {
b.Skip("skipping full pipeline benchmark in short mode")
}
zerolog.SetGlobalLevel(zerolog.Disabled)
defer zerolog.SetGlobalLevel(zerolog.DebugLevel)
for _, nodeCount := range []int{10, 50, 100, 200, 500} {
b.Run(strconv.Itoa(nodeCount), func(b *testing.B) {
testData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)

View File

@@ -16,7 +16,6 @@ import (
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
@@ -574,12 +573,6 @@ func TestEnhancedTrackingWithBatcher(t *testing.T) {
// and ensure all nodes can see all other nodes. This is a critical test for mesh network
// functionality where every node must be able to communicate with every other node.
func TestBatcherScalabilityAllToAll(t *testing.T) {
// Reduce verbose application logging for cleaner test output
originalLevel := zerolog.GlobalLevel()
defer zerolog.SetGlobalLevel(originalLevel)
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
// Test cases: different node counts to stress test the all-to-all connectivity
testCases := []struct {
name string

View File

@@ -0,0 +1,48 @@
package types
import (
"os"
"testing"
"github.com/rs/zerolog"
)
// EnvTestLogLevel overrides the default test log level. Accepts any zerolog
// level string: trace, debug, info, warn, error, fatal, panic, disabled.
const EnvTestLogLevel = "HEADSCALE_TEST_LOG_LEVEL"
// init quiets zerolog when this package is loaded inside a test binary.
//
// hscontrol/types is transitively imported by every test in the repo that
// emits zerolog output, so this init() runs once per test binary and is
// the only place that needs to know about test logging configuration.
//
// Default: ErrorLevel (silent in green-path runs, real errors still surface).
// Override: HEADSCALE_TEST_LOG_LEVEL=debug (or trace, info, warn, disabled).
//
// Production binaries are unaffected because testing.Testing() returns false
// outside of test execution. The same testing.Testing() pattern is already
// used in hscontrol/db/users.go and hscontrol/db/node.go, so importing the
// testing package here is consistent with existing project conventions.
//
// Pitfalls:
// - log.Fatal still calls os.Exit and log.Panic still panics regardless of
// level — only the rendered message is suppressed.
// - Local buffer loggers (zerolog.New(&buf)) are also gated by the global
// level. Tests that assert on log output (currently only
// hscontrol/util/zlog) re-enable trace level via their own init_test.go.
func init() {
if !testing.Testing() {
return
}
if raw := os.Getenv(EnvTestLogLevel); raw != "" {
lvl, err := zerolog.ParseLevel(raw)
if err == nil {
zerolog.SetGlobalLevel(lvl)
return
}
}
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
}

View File

@@ -0,0 +1,18 @@
package zlog
import "github.com/rs/zerolog"
// init pins zerolog to TraceLevel for the zlog test binary.
//
// zlog's tests use zerolog.New(&buf) and assert on Info-level output. zerolog's
// (*Logger).should() gates emission on the global level, so any global level
// above Info would silently break the assertions.
//
// Today zlog does not transitively import hscontrol/types, so the test
// silencing init() in hscontrol/types/testlog.go does not run in this binary.
// This init defends against that changing in the future: if a future import
// chain pulls in hscontrol/types, this file will still ensure trace-level
// output is available for zlog's assertions.
func init() {
zerolog.SetGlobalLevel(zerolog.TraceLevel)
}