Compare commits

..

178 Commits

Author SHA1 Message Date
Marc 'risson' Schmitt
04c066d8b0 lint
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 19:20:34 +01:00
Marc 'risson' Schmitt
f3341a4b83 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 18:46:47 +01:00
Marc 'risson' Schmitt
27f652dcf3 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 16:22:45 +01:00
Marc 'risson' Schmitt
dca2c2f536 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 16:10:43 +01:00
Marc 'risson' Schmitt
5d426411dd wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:59:31 +01:00
Marc 'risson' Schmitt
35ec2ea930 remove useless change
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:58:09 +01:00
Marc 'risson' Schmitt
b7c4d04c16 Merge remote-tracking branch 'origin/multiple-listeners' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:56:52 +01:00
Marc 'risson' Schmitt
8ef1b945e8 lint
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:47:44 +01:00
Marc 'risson' Schmitt
7fab5b6e93 Merge branch 'main' into multiple-listeners 2026-03-19 14:23:47 +00:00
Marc 'risson' Schmitt
7468a7271c Update website/docs/releases/2026/v2026.5.md
Co-authored-by: Tana M Berry <tanamarieberry@yahoo.com>
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:23:30 +01:00
Marc 'risson' Schmitt
1a270f9c6e Apply suggestions from code review
Co-authored-by: Tana M Berry <tanamarieberry@yahoo.com>
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:23:01 +01:00
Marc 'risson' Schmitt
3ae126cd99 Update website/docs/install-config/configuration/configuration.mdx
Co-authored-by: Tana M Berry <tanamarieberry@yahoo.com>
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 15:22:42 +01:00
Marc 'risson' Schmitt
6db2fbc8aa wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 14:38:19 +01:00
Marc 'risson' Schmitt
32f6738a40 errgroup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 14:33:42 +01:00
Marc 'risson' Schmitt
1ddc596362 better unix listener
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-19 14:22:41 +01:00
Marc 'risson' Schmitt
1281371077 Merge branch 'main' into rust-server 2026-03-18 15:29:43 +01:00
Marc 'risson' Schmitt
58508ebc4e Merge branch 'main' into rust-server 2026-03-18 15:29:25 +01:00
Marc 'risson' Schmitt
aa614ad31c lint
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-18 15:22:47 +01:00
Marc 'risson' Schmitt
b9b1c7ccf6 metrics socket and healthchecks for all outposts
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-18 15:17:33 +01:00
Marc 'risson' Schmitt
f8209680fa server healthcheck and unix socket
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-18 14:58:55 +01:00
Marc 'risson' Schmitt
2b2c6a3b9b Merge branch 'main' into multiple-listeners 2026-03-18 13:43:09 +01:00
Marc 'risson' Schmitt
62644a79fd root: allow listening on multiple IPs
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-16 18:40:45 +01:00
Marc 'risson' Schmitt
c426c94a25 Merge branch 'main' into rust-server 2026-03-16 14:09:37 +01:00
Marc 'risson' Schmitt
2e04738306 use waitgroups for multiple servers, TODO: fix healthchecks
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-13 16:13:56 +01:00
Marc 'risson' Schmitt
297e8db6eb use waitgroups for multiple servers
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-13 16:03:45 +01:00
Marc 'risson' Schmitt
5b9a30be4b Merge branch 'main' into rust-server 2026-03-13 15:59:30 +01:00
Marc 'risson' Schmitt
457429f261 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-12 14:45:48 +01:00
Marc 'risson' Schmitt
a0bac73c59 go ak api controller: add support for unix urls
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-12 14:44:15 +01:00
Marc 'risson' Schmitt
b82abaf230 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-12 13:45:21 +01:00
Marc 'risson' Schmitt
c4b1e4bd44 http timeouts
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-12 13:41:50 +01:00
Marc 'risson' Schmitt
5592c4769a Merge branch 'main' into rust-server 2026-03-12 13:26:15 +01:00
Marc 'risson' Schmitt
f71f5b7278 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 19:12:03 +01:00
Marc 'risson' Schmitt
d7159cfce2 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 19:02:21 +01:00
Marc 'risson' Schmitt
30dc4e120b fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 19:01:15 +01:00
Marc 'risson' Schmitt
619023be75 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 19:00:17 +01:00
Marc 'risson' Schmitt
de63473cd2 more ci
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 18:59:23 +01:00
Marc 'risson' Schmitt
6aa50b962c rustfmt in ci
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 18:50:01 +01:00
Marc 'risson' Schmitt
f240ca1708 more ci
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 18:48:21 +01:00
Marc 'risson' Schmitt
550da2005e start ci
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 18:46:28 +01:00
Marc 'risson' Schmitt
8818a0b06c revert
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 17:54:18 +01:00
Marc 'risson' Schmitt
013190ddd0 fix tests?
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 17:50:43 +01:00
Marc 'risson' Schmitt
6fb777ae5b make server listen on unix socket
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 17:45:55 +01:00
Marc 'risson' Schmitt
41f13d8805 fix sentry tracing
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 16:26:12 +01:00
Marc 'risson' Schmitt
fc5f0e7dc5 spellcheck
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 15:20:51 +01:00
Marc 'risson' Schmitt
9b9379ac8f lint
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 15:09:28 +01:00
Marc 'risson' Schmitt
c4b0825dad tasks/test: remove worker health check
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 15:06:39 +01:00
Marc 'risson' Schmitt
946ace14c1 fix makefile
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-11 15:05:50 +01:00
Marc 'risson' Schmitt
6a9eb8e9c7 Merge branch 'main' into rust-server 2026-03-11 14:28:31 +01:00
Marc 'risson' Schmitt
4f0d0e72d5 disable sentry span handling, it's broken
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 18:55:59 +01:00
Marc 'risson' Schmitt
411648672e config: separate initial loading and starting the reloader
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 18:47:51 +01:00
Marc 'risson' Schmitt
d5f6d30aeb update deps
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 17:26:24 +01:00
Marc 'risson' Schmitt
1508ad0ab8 subpath
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 17:10:56 +01:00
Marc 'risson' Schmitt
892e8fd856 config fallback values
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 17:04:58 +01:00
Marc 'risson' Schmitt
d4b0ac7c14 more todos
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 16:14:19 +01:00
Marc 'risson' Schmitt
fe4857abbb db: use connection callbacks for search path and application name
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 16:13:02 +01:00
Marc 'risson' Schmitt
8b73872c0d refine sentry setup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 16:00:19 +01:00
Marc 'risson' Schmitt
d22597377a finally finish tracing
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 15:40:34 +01:00
Marc 'risson' Schmitt
58d198d60a Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-10 13:31:55 +01:00
Marc 'risson' Schmitt
1de19546d7 tracing almost done
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-09 18:00:38 +01:00
Marc 'risson' Schmitt
8ad054ce65 Merge branch 'main' into rust-server 2026-03-09 16:22:12 +01:00
Marc 'risson' Schmitt
df95fc89eb wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-06 19:53:19 +01:00
Marc 'risson' Schmitt
75898710f1 update, some logging
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-06 15:30:46 +01:00
Marc 'risson' Schmitt
3a5a0c2e4f fmt
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-06 13:45:37 +01:00
Marc 'risson' Schmitt
b806e14a00 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-06 13:44:28 +01:00
Marc 'risson' Schmitt
c2d02cd807 fix deny
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 17:47:54 +01:00
Marc 'risson' Schmitt
1212402231 remove deprecated rustls-pemfile
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 17:43:43 +01:00
Marc 'risson' Schmitt
2927f414c5 extract tracelayer
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 17:33:37 +01:00
Marc 'risson' Schmitt
5ba18fbd55 fmt
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 13:46:35 +01:00
Marc 'risson' Schmitt
1b108e40d6 Merge branch 'main' into rust-server 2026-03-05 13:46:07 +01:00
Marc 'risson' Schmitt
982ae7b261 nit
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 13:35:24 +01:00
Marc 'risson' Schmitt
294a656ad2 worker status
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-05 13:34:35 +01:00
Marc 'risson' Schmitt
dab8bab916 better handling of socket path for future testing, healthcheck
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-04 16:54:57 +01:00
Marc 'risson' Schmitt
ee1803a0ae pedantic
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-03 18:33:13 +01:00
Marc 'risson' Schmitt
99c9894a04 extract brands
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-03 16:48:25 +01:00
Marc 'risson' Schmitt
2352ce72c9 Merge branch 'main' into rust-server 2026-03-03 14:03:27 +01:00
Marc 'risson' Schmitt
bb28e6425d small fixes
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-02 18:08:57 +01:00
Marc 'risson' Schmitt
f2149dfd90 write mode to authentik-mode file
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-02 15:40:30 +01:00
Marc 'risson' Schmitt
2ff0f09db1 spawn_blocking for tls computations
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-02 15:24:46 +01:00
Marc 'risson' Schmitt
40a91fd4fb fix minor stuff
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-02 15:08:55 +01:00
Marc 'risson' Schmitt
2e3f76441c Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-03-02 15:05:46 +01:00
Marc 'risson' Schmitt
f91474dd91 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-27 18:19:54 +01:00
Marc 'risson' Schmitt
61dbd5976f some todos
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-27 18:16:25 +01:00
Marc 'risson' Schmitt
8099ac6508 some cleanup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-27 18:15:50 +01:00
Marc 'risson' Schmitt
61ed26e3f6 worker started from rust
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-27 16:54:14 +01:00
Marc 'risson' Schmitt
ea17d4cbf1 finish tls
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-27 13:58:38 +01:00
Marc 'risson' Schmitt
ac388667d0 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-26 17:14:08 +01:00
Marc 'risson' Schmitt
cdc42de5b5 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-24 18:50:08 +01:00
Marc 'risson' Schmitt
2770c3a7e0 Merge branch 'main' into rust-server 2026-02-24 15:20:32 +01:00
Marc 'risson' Schmitt
f41f501702 finish metrics
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-24 15:20:09 +01:00
Marc 'risson' Schmitt
08685a574a better metrics
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-23 18:40:32 +01:00
Marc 'risson' Schmitt
15377f5154 better arbiter again
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-23 15:30:33 +01:00
Marc 'risson' Schmitt
52da505aab Merge branch 'main' into rust-server 2026-02-23 13:47:34 +01:00
Marc 'risson' Schmitt
d8a2a069aa add tests for extractors
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-20 18:38:09 +01:00
Marc 'risson' Schmitt
fec9dcc2e7 better logging
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-20 16:54:32 +01:00
Marc 'risson' Schmitt
b644fa5a2c revert unintended change
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-20 13:30:00 +01:00
Marc 'risson' Schmitt
9a5d59533e remove rust worker, fixup main
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-20 13:28:02 +01:00
Marc 'risson' Schmitt
3c64570398 use arcswap instead of lock for config
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-20 13:19:54 +01:00
Marc 'risson' Schmitt
a735f6dcf3 support proxying websockets to core
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 18:44:16 +01:00
Marc 'risson' Schmitt
f33e7f13eb autoreload
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 17:47:21 +01:00
Marc 'risson' Schmitt
eee00fa29b fix returned headers
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 16:45:42 +01:00
Marc 'risson' Schmitt
5a95a14a8f wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 16:38:21 +01:00
Marc 'risson' Schmitt
7b46fac608 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 15:31:35 +01:00
Marc 'risson' Schmitt
bb488e1c2c some better lints
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 15:29:02 +01:00
Marc 'risson' Schmitt
138aa0e4e9 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-19 14:33:17 +01:00
Marc 'risson' Schmitt
e65cd2999f tls headers
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 19:36:53 +01:00
Marc 'risson' Schmitt
490790c272 cleanup forwarding
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 19:03:30 +01:00
Marc 'risson' Schmitt
b640b42dbb wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 18:16:42 +01:00
Marc 'risson' Schmitt
1371465ebe fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 18:13:48 +01:00
Marc 'risson' Schmitt
c623b96dc2 some more cleanup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 18:13:18 +01:00
Marc 'risson' Schmitt
43fe1918db cleanup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 17:38:59 +01:00
Marc 'risson' Schmitt
3e2489834d remove cargo.lock for docsmg
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 15:56:28 +01:00
Marc 'risson' Schmitt
7ba86b7de3 introduce the arbiter, treewide fmt
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 15:55:45 +01:00
Marc 'risson' Schmitt
85ef3cda04 bring docsmg up to standards
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 15:18:48 +01:00
Marc 'risson' Schmitt
62911536bf fix tests
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 14:14:29 +01:00
Marc 'risson' Schmitt
1a27971399 move everything to a single crate
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-18 14:13:51 +01:00
Marc 'risson' Schmitt
7a0e946bb5 start moving to a single crate
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-17 18:45:49 +01:00
Marc 'risson' Schmitt
428ccc2c14 fix metrics endpoint
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-17 18:27:17 +01:00
Marc 'risson' Schmitt
0b706d5830 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-17 13:44:03 +01:00
Marc 'risson' Schmitt
b9f4a1aed7 Merge branch 'main' into rust-server 2026-02-16 14:07:56 +01:00
Marc 'risson' Schmitt
d2cb45aadf start cleanup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-11 15:15:43 +01:00
Marc 'risson' Schmitt
de12748f25 Merge branch 'main' into rust-server 2026-02-11 14:41:18 +01:00
Marc 'risson' Schmitt
f8f39b8edc start on metrics
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-09 19:25:06 +01:00
Marc 'risson' Schmitt
986385a951 initialize python globally when needed
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-09 18:45:20 +01:00
Marc 'risson' Schmitt
129ed95cf0 Merge branch 'main' into rust-server 2026-02-09 18:39:57 +01:00
Marc 'risson' Schmitt
dc0d535fcc small improvements to storage token checks
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-09 18:39:40 +01:00
Marc 'risson' Schmitt
5c0e23a78f features for which process to build
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-09 18:07:39 +01:00
Marc 'risson' Schmitt
b4bf082864 Merge branch 'main' into rust-server 2026-02-09 14:37:11 +01:00
Dominic R
2f00983c29 static file handling 2026-02-08 11:41:30 -05:00
Dominic R
af93a1e230 rev tls_state to what it was before 2026-02-08 09:47:32 -05:00
Dominic R
dbb3898621 fix client error 2026-02-08 09:44:47 -05:00
Dominic R
a668ddcaf5 make it work on macos 2026-02-08 09:32:38 -05:00
Marc 'risson' Schmitt
051aea6f99 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-06 14:47:51 +01:00
Marc 'risson' Schmitt
b8104ec156 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-05 17:30:12 +01:00
Marc 'risson' Schmitt
e59970e6ab fmt
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-04 19:04:18 +01:00
Marc 'risson' Schmitt
0b50b0aa13 actually use the proxy protocol acceptor
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-04 19:03:45 +01:00
Marc 'risson' Schmitt
7b9b1c2c70 proxy protocol and tls extractors finally
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-04 18:55:41 +01:00
Marc 'risson' Schmitt
1e1cdffb33 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-04 14:29:20 +01:00
Marc 'risson' Schmitt
8ad572ba35 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-02 19:14:14 +01:00
Marc 'risson' Schmitt
8a5b8ad047 custom tls acceptor, wip proxy protocol acceptor
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-02 18:43:27 +01:00
Marc 'risson' Schmitt
907a4ce478 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-02-02 17:07:24 +01:00
Marc 'risson' Schmitt
a26254df02 compression and loading page
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-30 17:58:59 +01:00
Marc 'risson' Schmitt
bf9679dcb5 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-30 17:01:10 +01:00
Marc 'risson' Schmitt
71ee2f6c66 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-30 17:00:49 +01:00
Marc 'risson' Schmitt
90fb12a804 proxying works correctly now
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-30 16:09:41 +01:00
Marc 'risson' Schmitt
e271a8a0af static files
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-29 15:43:36 +01:00
Marc 'risson' Schmitt
6100fd7800 remove sea-orm
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-29 13:59:11 +01:00
Marc 'risson' Schmitt
b78d62f550 add console-subscriber for tokio-console debugging
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 15:56:14 +01:00
Marc 'risson' Schmitt
21eb1bb7d0 fix gunicorn shutdown detection
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 15:56:01 +01:00
Marc 'risson' Schmitt
e4445a44c4 db
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 15:22:41 +01:00
Marc 'risson' Schmitt
6fecbb41ca start on db
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 01:53:37 +01:00
Marc 'risson' Schmitt
4a840796bf Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 01:36:55 +01:00
Marc 'risson' Schmitt
cc7f190735 config reloading
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-28 01:33:25 +01:00
Marc 'risson' Schmitt
c4962f86dd wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-27 20:17:32 +01:00
Marc 'risson' Schmitt
ad672338e0 Merge branch 'main' into rust-server 2026-01-27 15:13:32 +01:00
Marc 'risson' Schmitt
fadf344955 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-26 14:25:40 +01:00
Marc 'risson' Schmitt
8c58873a3a wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-22 17:19:21 +01:00
Marc 'risson' Schmitt
ac7dd69be2 fixup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-21 17:57:24 +01:00
Marc 'risson' Schmitt
f01ab7ccb2 we proxying
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-21 17:53:56 +01:00
Marc 'risson' Schmitt
13f7ac6eca wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-20 17:57:09 +01:00
Marc 'risson' Schmitt
24202f9a3f wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-20 15:26:56 +01:00
Marc 'risson' Schmitt
5a72130576 cleanup
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-19 17:52:35 +01:00
Marc 'risson' Schmitt
fe5d24004e Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-19 16:30:12 +01:00
Marc 'risson' Schmitt
dd7c13c5bd wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-12 18:12:52 +01:00
Marc 'risson' Schmitt
32de1ab6c6 Merge branch 'main' into rust-server
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2026-01-12 14:07:32 +01:00
Marc 'risson' Schmitt
6e4384d672 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-17 14:26:40 +01:00
Marc 'risson' Schmitt
79f7759d4b Merge branch 'main' into rust-server 2025-11-14 14:48:26 +01:00
Marc 'risson' Schmitt
0ca41cb184 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-13 19:01:40 +01:00
Marc 'risson' Schmitt
f8e5c895d6 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-13 18:58:30 +01:00
Marc 'risson' Schmitt
2ba8991a3b wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-13 18:30:57 +01:00
Marc 'risson' Schmitt
19b36d2e0d wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-13 14:08:54 +01:00
Marc 'risson' Schmitt
fb802a53bc Merge branch 'main' into rust-server 2025-11-13 03:28:17 +01:00
Marc 'risson' Schmitt
2f6465d5a0 Merge branch 'main' into rust-server 2025-11-12 15:16:35 +01:00
Marc 'risson' Schmitt
c5437d2b0b wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-07 18:46:21 +01:00
Marc 'risson' Schmitt
8e2e90a87f wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-06 19:14:07 +01:00
Marc 'risson' Schmitt
4deb3d45cf wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-06 18:43:33 +01:00
Marc 'risson' Schmitt
b61bb3cc17 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-05 16:07:11 +01:00
Marc 'risson' Schmitt
af3332df9f wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-04 19:30:43 +01:00
Marc 'risson' Schmitt
0849df7478 wip
Signed-off-by: Marc 'risson' Schmitt <marc.schmitt@risson.space>
2025-11-04 17:53:54 +01:00
239 changed files with 12750 additions and 2891 deletions

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

View File

@@ -1,5 +1,17 @@
[licenses]
allow = ["Apache-2.0", "MIT", "MPL-2.0", "Unicode-3.0"]
allow = [
"Apache-2.0 WITH LLVM-exception",
"Apache-2.0",
"BSD-3-Clause",
"CC0-1.0",
"CDLA-Permissive-2.0",
"ISC",
"MIT",
"MPL-2.0",
"OpenSSL",
"Unicode-3.0",
"Zlib",
]
[licenses.private]
ignore = true

View File

@@ -115,13 +115,20 @@ runs:
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.token }}
PR_NUMBER: ${{ steps.should_run.outputs.pr_number }}
REASON: ${{ steps.should_run.outputs.reason }}
run: |
set -e -o pipefail
PR_NUMBER="${{ steps.should_run.outputs.pr_number }}"
# Get PR details
PR_DATA=$(gh api repos/${{ github.repository }}/pulls/$PR_NUMBER)
PR_TITLE=$(echo "$PR_DATA" | jq -r '.title')
PR_AUTHOR=$(echo "$PR_DATA" | jq -r '.user.login')
echo "pr_title=$PR_TITLE" >> $GITHUB_OUTPUT
echo "pr_author=$PR_AUTHOR" >> $GITHUB_OUTPUT
# Determine which labels to process
if [ "${REASON}" = "label_added_to_merged_pr" ]; then
if [ "${{ steps.should_run.outputs.reason }}" = "label_added_to_merged_pr" ]; then
# Only process the specific label that was just added
if [ "${{ github.event_name }}" = "issues" ]; then
LABEL_NAME="${{ github.event.label.name }}"
@@ -145,13 +152,13 @@ runs:
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.token }}
PR_NUMBER: '${{ steps.should_run.outputs.pr_number }}'
COMMIT_SHA: '${{ steps.should_run.outputs.merge_commit_sha }}'
PR_TITLE: ${{ github.event.pull_request.title }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
LABELS: '${{ steps.pr_details.outputs.labels }}'
run: |
set -e -o pipefail
PR_NUMBER='${{ steps.should_run.outputs.pr_number }}'
COMMIT_SHA='${{ steps.should_run.outputs.merge_commit_sha }}'
PR_TITLE='${{ steps.pr_details.outputs.pr_title }}'
PR_AUTHOR='${{ steps.pr_details.outputs.pr_author }}'
LABELS='${{ steps.pr_details.outputs.labels }}'
echo "Processing PR #$PR_NUMBER (reason: ${{ steps.should_run.outputs.reason }})"
echo "Found backport labels: $LABELS"

View File

@@ -36,19 +36,16 @@ runs:
run: uv sync --all-extras --dev --frozen
- name: Setup rust (stable)
if: ${{ contains(inputs.dependencies, 'rust') && !contains(inputs.dependencies, 'rust-nightly') }}
uses: actions-rust-lang/setup-rust-toolchain@150fca883cd4034361b621bd4e6a9d34e5143606 # v1
with:
rustflags: ""
uses: actions-rust-lang/setup-rust-toolchain@a0b538fa0b742a6aa35d6e2c169b4bd06d225a98 # v1
- name: Setup rust (nightly)
if: ${{ contains(inputs.dependencies, 'rust-nightly') }}
uses: actions-rust-lang/setup-rust-toolchain@150fca883cd4034361b621bd4e6a9d34e5143606 # v1
uses: actions-rust-lang/setup-rust-toolchain@a0b538fa0b742a6aa35d6e2c169b4bd06d225a98 # v1
with:
toolchain: nightly
components: rustfmt
rustflags: ""
- name: Setup rust dependencies
if: ${{ contains(inputs.dependencies, 'rust') }}
uses: taiki-e/install-action@06203676c62f0d3c765be3f2fcfbebbcb02d09f5 # v2
uses: taiki-e/install-action@64c5c20c872907b6f7cd50994ac189e7274160f2 # v2
with:
tool: cargo-deny cargo-machete cargo-llvm-cov nextest
- name: Setup node (web)

View File

@@ -21,7 +21,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
with:
token: ${{ steps.generate_token.outputs.token }}

View File

@@ -16,6 +16,7 @@ env:
POSTGRES_DB: authentik
POSTGRES_USER: authentik
POSTGRES_PASSWORD: "EK-5jnKfjrGRm<77"
RUSTFLAGS: "-Dwarnings"
permissions:
# Needed for checkout
@@ -143,6 +144,7 @@ jobs:
CI_TEST_SEED: ${{ needs.test-make-seed.outputs.seed }}
CI_RUN_ID: ${{ matrix.run_id }}
CI_TOTAL_RUNS: "5"
PROMETHEUS_MULTIPROC_DIR: /tmp
run: |
uv run make ci-test
- uses: ./.github/actions/test-results
@@ -172,6 +174,7 @@ jobs:
CI_TEST_SEED: ${{ needs.test-make-seed.outputs.seed }}
CI_RUN_ID: ${{ matrix.run_id }}
CI_TOTAL_RUNS: "5"
PROMETHEUS_MULTIPROC_DIR: /tmp
run: |
uv run make ci-test
- uses: ./.github/actions/test-results
@@ -188,6 +191,8 @@ jobs:
- name: Create k8s Kind Cluster
uses: helm/kind-action@ef37e7f390d99f746eb8b610417061a60e82a6cc # v1.14.0
- name: run integration
env:
PROMETHEUS_MULTIPROC_DIR: /tmp
run: |
uv run coverage run manage.py test tests/integration
uv run coverage xml
@@ -244,6 +249,8 @@ jobs:
npm run build
npm run build:sfe
- name: run e2e
env:
PROMETHEUS_MULTIPROC_DIR: /tmp
run: |
uv run coverage run manage.py test ${{ matrix.job.glob }}
uv run coverage xml
@@ -287,6 +294,8 @@ jobs:
npm run build
npm run build:sfe
- name: run conformance
env:
PROMETHEUS_MULTIPROC_DIR: /tmp
run: |
uv run coverage run manage.py test ${{ matrix.job.glob }}
uv run coverage xml

View File

@@ -32,7 +32,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
with:
token: ${{ steps.generate_token.outputs.token }}

View File

@@ -19,7 +19,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
with:
token: ${{ steps.generate_token.outputs.token }}

View File

@@ -14,7 +14,7 @@ jobs:
if: ${{ env.GH_APP_ID != '' }}
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
env:
GH_APP_ID: ${{ secrets.GH_APP_ID }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5

View File

@@ -19,7 +19,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- name: Delete 'dev' containers older than a week
uses: snok/container-retention-policy@3b0972b2276b171b212f8c4efbca59ebba26eceb # v3.0.1
with:

View File

@@ -32,7 +32,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- name: Checkout main
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
with:
@@ -60,7 +60,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- name: Checkout main
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
with:

View File

@@ -70,7 +70,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- id: get-user-id
name: Get GitHub app user ID
run: echo "user-id=$(gh api "/users/${{ steps.app-token.outputs.app-slug }}[bot]" --jq .id)" >> "$GITHUB_OUTPUT"
@@ -118,7 +118,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
repositories: helm
- id: get-user-id
name: Get GitHub app user ID
@@ -160,7 +160,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
repositories: version
- id: get-user-id
name: Get GitHub app user ID

View File

@@ -18,7 +18,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10
with:
repo-token: ${{ steps.generate_token.outputs.token }}

View File

@@ -24,7 +24,7 @@ jobs:
uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v2
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIV_KEY }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v5
if: ${{ github.event_name != 'pull_request' }}
with:

View File

@@ -16,7 +16,7 @@ Cargo.toml @goauthentik/backend
Cargo.lock @goauthentik/backend
go.mod @goauthentik/backend
go.sum @goauthentik/backend
.cargo/ @goauthentik/backend
.config/ @goauthentik/backend
rust-toolchain.toml @goauthentik/backend
# Infrastructure
.github/ @goauthentik/infrastructure

4916
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
[workspace]
members = ["website/scripts/docsmg"]
members = [".", "website/scripts/docsmg"]
resolver = "3"
[workspace.package]
@@ -12,11 +12,101 @@ license-file = "LICENSE"
publish = false
[workspace.dependencies]
arc-swap = "1.8.2"
argh = "0.1.17"
async-trait = "0.1.89"
aws-lc-rs = { version = "1.16.1", features = ["fips"] }
axum = { version = "0.8.8", features = ["http2", "macros", "ws"] }
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"] }
bytes = "1.11.1"
chrono = "0.4.44"
clap = { version = "4.5.59", features = ["derive", "env"] }
client-ip = { version = "0.2.1", features = ["forwarded-header"] }
color-eyre = "0.6.5"
colored = "3.1.1"
config = { version = "0.15.19", default-features = false, features = [
"yaml",
"async",
] }
console-subscriber = "0.5.0"
dotenvy = "0.15.7"
durstr = "0.4.0"
eyre = "0.6.12"
forwarded-header-value = "0.1.1"
futures = "0.3.32"
glob = "0.3.3"
http-body-util = "0.1.3"
hyper = "1.8.1"
hyper-unix-socket = "0.3.0"
hyper-util = "0.1.20"
ipnet = { version = "2.12.0", features = ["serde"] }
# See https://github.com/mladedav/json-subscriber/pull/23
json-subscriber = { git = "https://github.com/rissson/json-subscriber.git", rev = "950ad7cb887a0a14fd5cb8afb8e76db1f456c032" }
jsonwebtoken = { version = "10.3.0", default-features = false, features = [
"aws_lc_rs",
] }
metrics = "0.24.3"
metrics-exporter-prometheus = { version = "0.18.1", default-features = false }
nix = { version = "0.31.2", features = ["hostname", "signal"] }
notify = "8.2.0"
pem = "3.0.6"
pin-project-lite = "0.2.17"
pyo3 = "0.28.2"
percent-encoding = "2.3.2"
rcgen = { version = "0.14.7", default-features = false, features = [
"aws_lc_rs",
"fips",
] }
regex = "1.12.3"
rustls = { version = "0.23.37", features = ["fips"] }
sentry = { version = "0.47.0", default-features = false, features = [
"backtrace",
"contexts",
"debug-images",
"panic",
"rustls",
"reqwest",
"tower",
"tracing",
] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
sqlx = { version = "0.8.6", default-features = false, features = [
"runtime-tokio",
"tls-rustls-aws-lc-rs",
"postgres",
"derive",
"macros",
"uuid",
"chrono",
"ipnet",
"json",
] }
time = "0.3.47"
thiserror = "2.0.18"
tokio = { version = "1.50.0", features = ["full"] }
tokio-rustls = "0.26.4"
tokio-tungstenite = "0.28.0"
tokio-util = "0.7.18"
tower = "0.5.3"
tower-http = { version = "0.6.8", features = [
"compression-br",
"compression-deflate",
"compression-gzip",
"compression-zstd",
"fs",
"timeout",
] }
tower-service = "0.3.3"
tracing = "0.1.44"
tracing-error = "0.2.1"
tracing-subscriber = { version = "0.3.22", features = [
"env-filter",
"json",
"tracing-log",
] }
url = "2.5.8"
uuid = { version = "1.22.0", features = ["v4"] }
[profile.dev.package.backtrace]
opt-level = 3
@@ -60,10 +150,14 @@ perf = { priority = -1, level = "warn" }
style = { priority = -1, level = "warn" }
suspicious = { priority = -1, level = "warn" }
### and disable the ones we don't want
### cargo group
multiple_crate_versions = "allow"
### pedantic group
redundant_closure_for_method_calls = "allow"
struct_field_names = "allow"
too_many_lines = "allow"
### nursery
missing_const_for_fn = "allow"
redundant_pub_crate = "allow"
option_if_let_else = "allow"
### restriction group
@@ -78,7 +172,6 @@ create_dir = "warn"
dbg_macro = "warn"
default_numeric_fallback = "warn"
disallowed_script_idents = "warn"
doc_paragraphs_missing_punctuation = "warn"
empty_drop = "warn"
empty_enum_variants_with_brackets = "warn"
empty_structs_with_brackets = "warn"
@@ -131,3 +224,73 @@ unused_trait_names = "warn"
unwrap_in_result = "warn"
unwrap_used = "warn"
verbose_file_reads = "warn"
[package]
name = "authentik"
version = "2026.5.0-rc1"
authors.workspace = true
edition.workspace = true
readme.workspace = true
homepage.workspace = true
repository.workspace = true
license-file.workspace = true
publish.workspace = true
[features]
default = ["core", "proxy"]
proxy = []
core = ["proxy", "dep:sqlx", "dep:pyo3"]
[dependencies]
arc-swap.workspace = true
argh.workspace = true
async-trait.workspace = true
aws-lc-rs.workspace = true
axum-server.workspace = true
axum.workspace = true
client-ip.workspace = true
color-eyre.workspace = true
config.workspace = true
console-subscriber.workspace = true
durstr.workspace = true
eyre.workspace = true
forwarded-header-value.workspace = true
futures.workspace = true
glob.workspace = true
http-body-util.workspace = true
hyper-unix-socket.workspace = true
hyper-util.workspace = true
hyper.workspace = true
ipnet.workspace = true
json-subscriber.workspace = true
jsonwebtoken.workspace = true
metrics.workspace = true
metrics-exporter-prometheus.workspace = true
nix.workspace = true
notify.workspace = true
pem.workspace = true
percent-encoding.workspace = true
pin-project-lite.workspace = true
pyo3 = { workspace = true, optional = true }
rcgen.workspace = true
rustls.workspace = true
sentry.workspace = true
serde.workspace = true
serde_json.workspace = true
sqlx = { workspace = true, optional = true }
thiserror.workspace = true
time.workspace = true
tokio-rustls.workspace = true
tokio-tungstenite.workspace = true
tokio-util.workspace = true
tokio.workspace = true
tower-http.workspace = true
tower.workspace = true
tracing-error.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true
url.workspace = true
uuid.workspace = true
[lints]
workspace = true

View File

@@ -110,12 +110,24 @@ i18n-extract: core-i18n-extract web-i18n-extract ## Extract strings that requir
aws-cfn:
cd lifecycle/aws && npm i && $(UV) run npm run aws-cfn
run-server: ## Run the main authentik server process
run: ## Run the authentik server and worker, without auto reloading
$(UV) run ak allinone
run-watch: ## Run the authentik server and worker, with auto reloading
$(UV) run watchexec --on-busy-update=restart --stop-signal=SIGINT --exts py,rs --no-meta --notify -- ak allinone
run-server: ## Run the authentik server, without auto reloading
$(UV) run ak server
run-worker: ## Run the main authentik worker process
run-server-watch: ## Run the authentik server, with auto reloading
$(UV) run watchexec --on-busy-update=restart --stop-signal=SIGINT --exts py,rs --no-meta --notify -- ak server
run-worker: ## Run the authentik worker, without auto reloading
$(UV) run ak worker
run-worker-watch: ## Run the authentik worker, with auto reloading
$(UV) run watchexec --on-busy-update=restart --stop-signal=SIGINT --exts py,rs --no-meta --notify -- ak worker
core-i18n-extract:
$(UV) run ak makemessages \
--add-location file \
@@ -154,7 +166,7 @@ ifndef version
$(error Usage: make bump version=20xx.xx.xx )
endif
$(eval current_version := $(shell cat ${PWD}/internal/constants/VERSION))
$(SED_INPLACE) 's/^version = ".*"/version = "$(version)"/' ${PWD}/pyproject.toml
$(SED_INPLACE) 's/^version = ".*"/version = "$(version)"/' ${PWD}/pyproject.toml ${PWD}/Cargo.toml
$(SED_INPLACE) 's/^VERSION = ".*"/VERSION = "$(version)"/' ${PWD}/authentik/__init__.py
$(MAKE) gen-build gen-compose aws-cfn
$(SED_INPLACE) "s/\"${current_version}\"/\"$(version)\"/" ${PWD}/package.json ${PWD}/package-lock.json ${PWD}/web/package.json ${PWD}/web/package-lock.json
@@ -368,7 +380,7 @@ ci-lint-rustfmt: ci--meta-debug
$(CARGO) +nightly fmt --all --check -- --config-path .cargo/rustfmt.toml
ci-lint-clippy: ci--meta-debug
$(CARGO) clippy --workspace -- -D warnings
$(CARGO) clippy -- -D warnings
ci-test: ci--meta-debug
$(UV) run coverage run manage.py test --keepdb authentik

View File

@@ -92,6 +92,7 @@ class FileBackend(ManageableBackend):
"nbf": now() - timedelta(seconds=15),
},
key=sha256(f"{settings.SECRET_KEY}:{self.usage}".encode()).hexdigest(),
# Must match crates/authentik-server/src/static.rs
algorithm="HS256",
)
url = f"{prefix}/files/{path}?token={token}"

View File

@@ -1,7 +1,5 @@
"""Apply blueprint from commandline"""
from sys import exit as sys_exit
from django.core.management.base import BaseCommand, no_translations
from structlog.stdlib import get_logger
@@ -28,7 +26,7 @@ class Command(BaseCommand):
self.stderr.write("Blueprint invalid")
for log in logs:
self.stderr.write(f"\t{log.logger}: {log.event}: {log.attributes}")
sys_exit(1)
raise RuntimeError("Blueprint invalid")
importer.apply()
def add_arguments(self, parser):

View File

@@ -25,7 +25,6 @@ from authentik.core.api.providers import ProviderSerializer
from authentik.core.api.used_by import UsedByMixin
from authentik.core.api.users import UserSerializer
from authentik.core.api.utils import ModelSerializer, ThemedUrlsSerializer
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.models import Application, User
from authentik.events.logs import LogEventSerializer, capture_logs
from authentik.policies.api.exec import PolicyTestResultSerializer
@@ -164,7 +163,6 @@ class ApplicationViewSet(UsedByMixin, ModelViewSet):
request.user = user
for application in paginated_apps:
engine = PolicyEngine(application, request.user, request)
engine.empty_result = AppAccessWithoutBindings.get()
engine.build()
if engine.passing:
applications.append(application)
@@ -222,7 +220,6 @@ class ApplicationViewSet(UsedByMixin, ModelViewSet):
if not for_user:
raise ValidationError({"for_user": "User not found"})
engine = PolicyEngine(application, for_user, request)
engine.empty_result = AppAccessWithoutBindings.get()
engine.use_cache = False
with capture_logs() as logs:
engine.build()
@@ -242,6 +239,11 @@ class ApplicationViewSet(UsedByMixin, ModelViewSet):
@extend_schema(
parameters=[
OpenApiParameter(
name="superuser_full_list",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.BOOL,
),
OpenApiParameter(
name="for_user",
location=OpenApiParameter.QUERY,
@@ -252,17 +254,18 @@ class ApplicationViewSet(UsedByMixin, ModelViewSet):
location=OpenApiParameter.QUERY,
type=OpenApiTypes.BOOL,
),
],
responses={
200: ApplicationSerializer(many=True),
},
operation_id="core_applications_accessible_list",
]
)
@action(methods=["GET"], detail=False, url_path="@accessible")
def accessible(self, request: Request) -> Response:
"""Get applications accessible for user"""
def list(self, request: Request) -> Response:
"""Custom list method that checks Policy based access instead of guardian"""
should_cache = request.query_params.get("search", "") == ""
superuser_full_list = (
str(request.query_params.get("superuser_full_list", "false")).lower() == "true"
)
if superuser_full_list and request.user.is_superuser:
return super().list(request)
only_with_launch_url = str(
request.query_params.get("only_with_launch_url", "false")
).lower()

View File

@@ -1,20 +1,7 @@
"""authentik core app config"""
from django.utils.translation import gettext_lazy as _
from authentik.blueprints.apps import ManagedAppConfig
from authentik.tasks.schedules.common import ScheduleSpec
from authentik.tenants.flags import Flag
class AppAccessWithoutBindings(Flag[bool], key="core_default_app_access"):
default = True
visibility = "none"
description = _(
"Configure if applications without any policy/group/user bindings "
"should be accessible to any user."
)
class AuthentikCoreConfig(ManagedAppConfig):

View File

@@ -24,8 +24,7 @@ from authentik.root.ws.consumer import build_device_group
# Arguments: user: User, password: str
password_changed = Signal()
# Arguments: credentials: dict[str, any], request: HttpRequest,
# stage: Stage, context: dict[str, any]
# Arguments: credentials: dict[str, any], request: HttpRequest, stage: Stage
login_failed = Signal()
LOGGER = get_logger()

View File

@@ -80,10 +80,10 @@ class TestApplicationsAPI(APITestCase):
self.assertEqual(body["passing"], False)
self.assertEqual(body["messages"], ["dummy"])
def test_list_accessible(self):
"""Test list operation without"""
def test_list(self):
"""Test list operation without superuser_full_list"""
self.client.force_login(self.user)
response = self.client.get(reverse("authentik_api:application-accessible"))
response = self.client.get(reverse("authentik_api:application-list"))
self.assertJSONEqual(
response.content.decode(),
{
@@ -136,10 +136,12 @@ class TestApplicationsAPI(APITestCase):
},
)
def test_list_rbac(self):
"""Test list operation"""
def test_list_superuser_full_list(self):
"""Test list operation with superuser_full_list"""
self.client.force_login(self.user)
response = self.client.get(reverse("authentik_api:application-list"))
response = self.client.get(
reverse("authentik_api:application-list") + "?superuser_full_list=true"
)
self.assertJSONEqual(
response.content.decode(),
{

View File

@@ -1,7 +1,6 @@
"""Enterprise app config"""
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from authentik.enterprise.apps import EnterpriseConfig
from authentik.tenants.flags import Flag
@@ -10,9 +9,6 @@ from authentik.tenants.flags import Flag
class AuditIncludeExpandedDiff(Flag[bool], key="enterprise_audit_include_expanded_diff"):
default = False
visibility = "none"
description = _(
"Include additional information in audit logs, may incur a performance penalty."
)
class AuthentikEnterpriseAuditConfig(EnterpriseConfig):

View File

@@ -3,7 +3,6 @@ from hmac import compare_digest
from django.http import Http404, HttpRequest, HttpResponse, HttpResponseBadRequest, QueryDict
from authentik.common.oauth.constants import QS_LOGIN_HINT
from authentik.endpoints.connectors.agent.auth import (
agent_auth_issue_token,
check_device_policies,
@@ -15,7 +14,7 @@ from authentik.enterprise.policy import EnterprisePolicyAccessView
from authentik.flows.exceptions import FlowNonApplicableException
from authentik.flows.models import in_memory_stage
from authentik.flows.planner import PLAN_CONTEXT_DEVICE, FlowPlanner
from authentik.flows.stage import PLAN_CONTEXT_PENDING_USER_IDENTIFIER, StageView
from authentik.flows.stage import StageView
from authentik.providers.oauth2.utils import HttpResponseRedirectScheme
QS_AGENT_IA_TOKEN = "ak-auth-ia-token" # nosec
@@ -65,14 +64,14 @@ class AgentInteractiveAuth(EnterprisePolicyAccessView):
planner = FlowPlanner(self.connector.authorization_flow)
planner.allow_empty_flows = True
context = {
PLAN_CONTEXT_DEVICE: self.device,
PLAN_CONTEXT_DEVICE_AUTH_TOKEN: self.auth_token,
}
if QS_LOGIN_HINT in request.GET:
context[PLAN_CONTEXT_PENDING_USER_IDENTIFIER] = request.GET[QS_LOGIN_HINT]
try:
plan = planner.plan(self.request, context)
plan = planner.plan(
self.request,
{
PLAN_CONTEXT_DEVICE: self.device,
PLAN_CONTEXT_DEVICE_AUTH_TOKEN: self.auth_token,
},
)
except FlowNonApplicableException:
return self.handle_no_permission_authenticated()
plan.append_stage(in_memory_stage(AgentAuthFulfillmentStage))
@@ -85,6 +84,7 @@ class AgentInteractiveAuth(EnterprisePolicyAccessView):
class AgentAuthFulfillmentStage(StageView):
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
device: Device = self.executor.plan.context.pop(PLAN_CONTEXT_DEVICE)
auth_token: DeviceAuthenticationToken = self.executor.plan.context.pop(

View File

@@ -8,7 +8,6 @@ from dramatiq.actor import actor
from requests.exceptions import RequestException
from structlog.stdlib import get_logger
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.models import User
from authentik.enterprise.providers.ssf.models import (
DeliveryMethods,
@@ -69,7 +68,6 @@ def _check_app_access(stream: Stream, event_data: dict) -> bool:
if not user:
return True
engine = PolicyEngine(stream.provider.backchannel_application, user)
engine.empty_result = AppAccessWithoutBindings.get()
engine.use_cache = False
engine.build()
return engine.passing

View File

@@ -93,13 +93,11 @@ def on_login_failed(
credentials: dict[str, str],
request: HttpRequest,
stage: Stage | None = None,
context: dict[str, Any] | None = None,
**kwargs,
):
"""Failed Login, authentik custom event"""
user = User.objects.filter(username=credentials.get("username")).first()
context = context or {}
Event.new(EventAction.LOGIN_FAILED, **credentials, stage=stage, **context).from_http(
Event.new(EventAction.LOGIN_FAILED, **credentials, stage=stage, **kwargs).from_http(
request, user
)

View File

@@ -207,9 +207,3 @@ class TestEvents(TestCase):
"username": user.username,
},
)
def test_invalid_string(self):
"""Test creating an event with invalid unicode string data"""
event = Event.new("unittest", foo="foo bar \u0000 baz")
event.save()
self.assertEqual(event.context["foo"], "foo bar baz")

View File

@@ -36,10 +36,6 @@ ALLOWED_SPECIAL_KEYS = re.compile(
)
def cleanse_str(raw: Any) -> str:
return str(raw).replace("\u0000", "")
def cleanse_item(key: str, value: Any) -> Any:
"""Cleanse a single item"""
if isinstance(value, dict):
@@ -70,7 +66,7 @@ def cleanse_dict(source: dict[Any, Any]) -> dict[Any, Any]:
def model_to_dict(model: Model) -> dict[str, Any]:
"""Convert model to dict"""
name = cleanse_str(model)
name = str(model)
if hasattr(model, "name"):
name = model.name
return {
@@ -137,11 +133,11 @@ def sanitize_item(value: Any) -> Any: # noqa: PLR0911, PLR0912
if isinstance(value, ASN):
return ASN_CONTEXT_PROCESSOR.asn_to_dict(value)
if isinstance(value, Path):
return cleanse_str(value)
return str(value)
if isinstance(value, Exception):
return cleanse_str(value)
return str(value)
if isinstance(value, YAMLTag):
return cleanse_str(value)
return str(value)
if isinstance(value, Enum):
return value.value
if isinstance(value, type):
@@ -165,7 +161,7 @@ def sanitize_item(value: Any) -> Any: # noqa: PLR0911, PLR0912
raise ValueError("JSON can't represent timezone-aware times.")
return value.isoformat()
if isinstance(value, timedelta):
return cleanse_str(value.total_seconds())
return str(value.total_seconds())
if callable(value):
return {
"type": "callable",
@@ -178,8 +174,8 @@ def sanitize_item(value: Any) -> Any: # noqa: PLR0911, PLR0912
try:
return DjangoJSONEncoder().default(value)
except TypeError:
return cleanse_str(value)
return cleanse_str(value)
return str(value)
return str(value)
def sanitize_dict(source: dict[Any, Any]) -> dict[Any, Any]:

View File

@@ -1,6 +1,5 @@
"""authentik flows app config"""
from django.utils.translation import gettext_lazy as _
from prometheus_client import Gauge, Histogram
from authentik.blueprints.apps import ManagedAppConfig
@@ -28,14 +27,12 @@ class RefreshOtherFlowsAfterAuthentication(Flag[bool], key="flows_refresh_others
default = False
visibility = "public"
description = _("Refresh other tabs after successful authentication.")
class ContinuousLogin(Flag[bool], key="flows_continuous_login"):
default = False
visibility = "public"
description = _("Upon successful authentication, re-start authentication in other open tabs.")
class AuthentikFlowsConfig(ManagedAppConfig):

View File

@@ -342,10 +342,10 @@ def django_db_config(config: ConfigLoader | None = None) -> dict:
"default": {
"ENGINE": "psqlextra.backend",
"HOST": config.get("postgresql.host"),
"NAME": config.get("postgresql.name"),
"PORT": config.get("postgresql.port"),
"USER": config.get("postgresql.user"),
"PASSWORD": config.get("postgresql.password"),
"PORT": config.get("postgresql.port"),
"NAME": config.get("postgresql.name"),
"OPTIONS": {
"sslmode": config.get("postgresql.sslmode"),
"sslrootcert": config.get("postgresql.sslrootcert"),

View File

@@ -17,11 +17,12 @@
postgresql:
host: localhost
name: authentik
user: authentik
port: 5432
user: authentik
password: "env://POSTGRES_PASSWORD"
name: authentik
sslmode: disable
conn_max_age: 60
conn_health_checks: false
use_pool: False
test:
@@ -73,6 +74,19 @@ log_level: info
log:
http_headers:
- User-Agent
rust_log:
"console_subscriber": info
"h2": info
"hyper_util": warn
"mio": info
"notify": info
"reqwest": info
"runtime": info
"rustls": info
"sqlx": info
"sqlx_postgres": info
"tokio": info
"tungstenite": info
sessions:
unauthenticated_age: days=1

View File

@@ -41,7 +41,7 @@ def structlog_configure():
add_process_id,
add_tenant_information,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso", utc=False),
structlog.processors.TimeStamper(fmt="iso", utc=True),
structlog.processors.StackInfoRenderer(),
structlog.processors.ExceptionRenderer(
structlog.tracebacks.ExceptionDictTransformer(show_locals=CONFIG.get_bool("debug"))

View File

@@ -1,4 +1,4 @@
"""authentik policies app config
"""Authentik policies app config
Every system policy should be its own Django app under the `policies` app.
For example: The 'dummy' policy is available at `authentik.policies.dummy`.
@@ -38,3 +38,4 @@ class AuthentikPoliciesConfig(ManagedAppConfig):
label = "authentik_policies"
verbose_name = "authentik Policies"
default = True
mountpoint = "policy/"

View File

@@ -0,0 +1,121 @@
{% extends 'login/base_full.html' %}
{% load static %}
{% load i18n %}
{% block head %}
{{ block.super }}
<script>
"use strict";
let redirecting = false;
async function checkAuth() {
if (redirecting) {
console.debug(
"authentik/policies/buffer: Already authenticating in another tab. This page will refresh once authentication is completed.",
);
return true;
}
const url = "{{ check_auth_url }}";
console.debug("authentik/policies/buffer: Checking authentication...");
return fetch(url, {
method: "HEAD",
})
.then((response) => {
if (response.status >= 400) {
return false;
}
console.debug("authentik/policies/buffer: Continuing");
if ("{{ auth_req_method }}" === "post") {
document.querySelector("form")?.submit();
return true;
}
window.location.assign("{{ continue_url|escapejs }}");
return true;
})
.catch((error) => {
console.warn("authentik/policies/buffer: Error checking authentication.", error);
return false;
})
}
const offset = 20;
let timeoutID = -1;
let timeout = 100;
let attempts = 0;
async function main() {
window.clearTimeout(timeoutID);
attempts += 1;
redirecting = await checkAuth();
console.debug(`authentik/policies/buffer: Waiting ${timeout}ms...`);
timeoutID = window.setTimeout(main, timeout);
timeout += offset * attempts;
if (timeout >= 2000) {
timeout = 2000;
}
}
document.addEventListener("visibilitychange", async () => {
if (document.hidden) return;
console.debug("authentik/policies/buffer: Checking authentication on tab activate...");
redirecting = await checkAuth();
});
main();
</script>
{% endblock %}
{% block title %}
{% trans 'Waiting for authentication...' %} - {{ brand.branding_title }}
{% endblock %}
{% block card_title %}
{% trans 'Waiting for authentication...' %}
{% endblock %}
{% block card %}
<form class="pf-c-form" method="{{ auth_req_method }}" action="{{ continue_url }}">
{% if auth_req_method == "post" %}
{% for key, value in auth_req_body.items %}
<input type="hidden" name="{{ key }}" value="{{ value }}" />
{% endfor %}
{% endif %}
<div class="pf-c-empty-state">
<div class="pf-c-empty-state__content">
<div class="pf-c-empty-state__icon">
<span class="pf-c-spinner pf-m-xl" role="progressbar">
<span class="pf-c-spinner__clipper"></span>
<span class="pf-c-spinner__lead-ball"></span>
<span class="pf-c-spinner__tail-ball"></span>
</span>
</div>
<h1 class="pf-c-title pf-m-lg">
{% trans "You're already authenticating in another tab. This page will refresh once authentication is completed." %}
</h1>
</div>
</div>
<div class="pf-c-form__group pf-m-action">
<a href="{{ auth_req_url }}" class="pf-c-button pf-m-primary pf-m-block">
{% trans "Authenticate in this tab" %}
</a>
</div>
</form>
{% endblock %}

View File

@@ -1,7 +1,14 @@
"""API URLs"""
from django.urls import path
from authentik.policies.api.bindings import PolicyBindingViewSet
from authentik.policies.api.policies import PolicyViewSet
from authentik.policies.views import BufferView
urlpatterns = [
path("buffer", BufferView.as_view(), name="buffer"),
]
api_urlpatterns = [
("policies/all", PolicyViewSet),

View File

@@ -4,12 +4,13 @@ from typing import Any
from django.contrib import messages
from django.contrib.auth.mixins import AccessMixin
from django.http import Http404, HttpRequest, HttpResponse
from django.http import Http404, HttpRequest, HttpResponse, QueryDict
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext as _
from django.views.generic.base import View
from django.views.generic.base import TemplateView, View
from structlog.stdlib import get_logger
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.models import Application, Provider, User
from authentik.flows.exceptions import EmptyFlowException, FlowNonApplicableException
from authentik.flows.models import Flow, FlowDesignation
@@ -29,6 +30,9 @@ from authentik.policies.models import PolicyBindingModel
from authentik.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
QS_BUFFER_ID = "af_bf_id"
QS_SKIP_BUFFER = "skip_buffer"
SESSION_KEY_BUFFER = "authentik/policies/pav_buffer/%s"
class RequestValidationError(SentryIgnoredException):
@@ -42,6 +46,12 @@ class RequestValidationError(SentryIgnoredException):
self.response = response
class BaseMixin:
"""Base Mixin class, used to annotate View Member variables"""
request: HttpRequest
class PolicyAccessView(AccessMixin, View):
"""Mixin class for usage in Authorization views.
Provider functions to check application access, etc"""
@@ -136,7 +146,6 @@ class PolicyAccessView(AccessMixin, View):
policy_engine = PolicyEngine(
pbm or self.application, user or self.request.user, self.request
)
policy_engine.empty_result = AppAccessWithoutBindings.get()
policy_engine.use_cache = False
policy_engine.request = self.modify_policy_request(policy_engine.request)
policy_engine.build()
@@ -153,3 +162,30 @@ class PolicyAccessView(AccessMixin, View):
for message in result.messages:
messages.error(self.request, _(message))
return result
def url_with_qs(url: str, **kwargs):
"""Update/set querystring of `url` with the parameters in `kwargs`. Original query string
parameters are retained"""
if "?" not in url:
return url + f"?{urlencode(kwargs)}"
url, _, qs = url.partition("?")
qs = QueryDict(qs, mutable=True)
qs.update(kwargs)
return url + f"?{urlencode(qs.items())}"
class BufferView(TemplateView):
"""Buffer view"""
template_name = "policies/buffer.html"
def get_context_data(self, **kwargs):
buf_id = self.request.GET.get(QS_BUFFER_ID)
buffer: dict = self.request.session.get(SESSION_KEY_BUFFER % buf_id)
kwargs["auth_req_method"] = buffer["method"]
kwargs["auth_req_body"] = buffer["body"]
kwargs["auth_req_url"] = url_with_qs(buffer["url"], **{QS_SKIP_BUFFER: True})
kwargs["check_auth_url"] = reverse("authentik_api:user-me")
kwargs["continue_url"] = url_with_qs(buffer["url"], **{QS_BUFFER_ID: buf_id})
return super().get_context_data(**kwargs)

View File

@@ -17,7 +17,6 @@ from rest_framework.viewsets import GenericViewSet, ModelViewSet
from authentik.core.api.providers import ProviderSerializer
from authentik.core.api.used_by import UsedByMixin
from authentik.core.api.utils import ModelSerializer, PassiveSerializer
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.models import Application
from authentik.policies.api.exec import PolicyTestResultSerializer
from authentik.policies.engine import PolicyEngine
@@ -154,7 +153,6 @@ class LDAPOutpostConfigViewSet(ListModelMixin, GenericViewSet):
provider = get_object_or_404(LDAPProvider, pk=pk)
application = get_object_or_404(Application, slug=request.query_params["app_slug"])
engine = PolicyEngine(application, request.user, request)
engine.empty_result = AppAccessWithoutBindings.get()
engine.use_cache = False
engine.build()
result = engine.result

View File

@@ -33,7 +33,6 @@ from authentik.common.oauth.constants import (
SCOPE_OFFLINE_ACCESS,
TOKEN_TYPE,
)
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.middleware import CTX_AUTH_VIA
from authentik.core.models import (
USER_ATTRIBUTE_EXPIRES,
@@ -148,7 +147,6 @@ class TokenParams:
):
user = self.user if self.user else get_anonymous_user()
engine = PolicyEngine(app, user, request)
engine.empty_result = AppAccessWithoutBindings.get()
# Don't cache as for client_credentials flows the user will not be set
# so we'll get generic cache results
engine.use_cache = False

View File

@@ -18,7 +18,6 @@ from rest_framework.viewsets import GenericViewSet, ModelViewSet
from authentik.core.api.providers import ProviderSerializer
from authentik.core.api.used_by import UsedByMixin
from authentik.core.api.utils import ModelSerializer, PassiveSerializer
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.expression.exceptions import PropertyMappingExpressionException
from authentik.core.models import Application
from authentik.events.models import Event, EventAction
@@ -170,7 +169,6 @@ class RadiusOutpostConfigViewSet(ListModelMixin, GenericViewSet):
provider = get_object_or_404(RadiusProvider, pk=pk)
application = get_object_or_404(Application, slug=request.query_params["app_slug"])
engine = PolicyEngine(application, request.user, request)
engine.empty_result = AppAccessWithoutBindings.get()
engine.use_cache = False
engine.build()
result = engine.result

View File

@@ -12,7 +12,6 @@ from requests.auth import AuthBase
from rest_framework.serializers import Serializer
from structlog.stdlib import get_logger
from authentik.core.apps import AppAccessWithoutBindings
from authentik.core.models import BackchannelProvider, Group, PropertyMapping, User, UserTypes
from authentik.lib.models import InternallyManagedMixin, SerializerModel
from authentik.lib.sync.outgoing.base import BaseOutgoingSyncClient
@@ -194,14 +193,13 @@ class SCIMProvider(OutgoingSyncProvider, BackchannelProvider):
# Filter users by their access to the backchannel application if an application is set
# This handles both policy bindings and group_filters
if self.backchannel_application:
pks = []
for user in base:
engine = PolicyEngine(self.backchannel_application, user, None)
engine.empty_result = AppAccessWithoutBindings.get()
engine.build()
if engine.passing:
pks.append(user.pk)
base = base.filter(pk__in=pks)
base = base.filter(
pk__in=[
user.pk
for user in base
if PolicyEngine(self.backchannel_application, user, None).build().passing
]
)
return base.order_by("pk")
if type == Group:

View File

@@ -339,6 +339,9 @@ class LoggingMiddleware:
def log(self, request: HttpRequest, status_code: int, runtime: int, **kwargs):
"""Log request"""
# Those are logged by the server above
if request.path in ("/-/metrics/", "/-/health/ready/"):
return
for header in self.headers_to_log:
header_value = request.headers.get(header)
if not header_value:

View File

@@ -5,18 +5,17 @@ from django.db.utils import OperationalError
from django.dispatch import Signal
from django.http import HttpRequest, HttpResponse
from django.views import View
from django_prometheus.exports import ExportToDjangoView
monitoring_set = Signal()
class MetricsView(View):
"""Wrapper around ExportToDjangoView with authentication, accessed by the authentik router"""
"""View for metrics monitoring_set signal, accessed by the authentik router"""
def get(self, request: HttpRequest) -> HttpResponse:
"""Check for HTTP-Basic auth"""
monitoring_set.send_robust(self)
return ExportToDjangoView(request)
return HttpResponse(status=204)
class LiveView(View):

View File

@@ -440,8 +440,6 @@ DRAMATIQ = {
("authentik.tasks.middleware.TaskLogMiddleware", {}),
("authentik.tasks.middleware.LoggingMiddleware", {}),
("authentik.tasks.middleware.DescriptionMiddleware", {}),
("authentik.tasks.middleware.WorkerHealthcheckMiddleware", {}),
("authentik.tasks.middleware.WorkerStatusMiddleware", {}),
(
"authentik.tasks.middleware.MetricsMiddleware",
{

View File

@@ -1,5 +1,9 @@
"""root tests"""
from pathlib import Path
from secrets import token_urlsafe
from tempfile import gettempdir
from django.test import TransactionTestCase
from django.urls import reverse
@@ -7,9 +11,26 @@ from django.urls import reverse
class TestRoot(TransactionTestCase):
"""Test root application"""
def test_monitoring(self):
def setUp(self):
_tmp = Path(gettempdir())
self.token = token_urlsafe(32)
with open(_tmp / "authentik-metrics-gunicorn.key", "w") as _f:
_f.write(self.token)
def tearDown(self):
_tmp = Path(gettempdir())
(_tmp / "authentik-metrics-gunicorn.key").unlink()
def test_monitoring_error(self):
"""Test monitoring without any credentials"""
response = self.client.get(reverse("metrics"))
self.assertEqual(response.status_code, 401)
def test_monitoring_ok(self):
"""Test monitoring with credentials"""
self.assertEqual(self.client.get(reverse("metrics")).status_code, 200)
auth_headers = {"HTTP_AUTHORIZATION": f"Bearer {self.token}"}
response = self.client.get(reverse("metrics"), **auth_headers)
self.assertEqual(response.status_code, 200)
def test_monitoring_live(self):
"""Test LiveView"""

View File

@@ -60,7 +60,11 @@ class LDAPSourceSerializer(SourceSerializer):
sources = sources.exclude(pk=self.instance.pk)
if sources.exists():
raise ValidationError(
_("Only a single LDAP Source with password synchronization is allowed")
{
"sync_users_password": _(
"Only a single LDAP Source with password synchronization is allowed"
)
}
)
return sync_users_password

View File

@@ -1,9 +1,8 @@
"""LDAP Source API tests"""
from rest_framework.exceptions import ErrorDetail
from rest_framework.test import APITestCase
from authentik.lib.generators import generate_id, generate_key
from authentik.lib.generators import generate_key
from authentik.sources.ldap.api import LDAPSourceSerializer
from authentik.sources.ldap.models import LDAPSource
@@ -27,13 +26,12 @@ class LDAPAPITests(APITestCase):
}
)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.errors, {})
def test_sync_users_password_invalid(self):
"""Ensure only a single source with password sync can be created"""
LDAPSource.objects.create(
name="foo",
slug=generate_id(),
slug="foo",
server_uri="ldaps://1.2.3.4",
bind_cn="",
bind_password=LDAP_PASSWORD,
@@ -43,26 +41,15 @@ class LDAPAPITests(APITestCase):
serializer = LDAPSourceSerializer(
data={
"name": "foo",
"slug": generate_id(),
"slug": " foo",
"server_uri": "ldaps://1.2.3.4",
"bind_cn": "",
"bind_password": LDAP_PASSWORD,
"base_dn": "dc=foo",
"sync_users_password": True,
"sync_users_password": False,
}
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{
"sync_users_password": [
ErrorDetail(
string="Only a single LDAP Source with password synchronization is allowed",
code="invalid",
)
]
},
)
def test_sync_users_mapping_empty(self):
"""Check that when sync_users is enabled, property mappings must be set"""

View File

@@ -38,7 +38,6 @@ from authentik.stages.authenticator_validate.models import AuthenticatorValidate
from authentik.stages.authenticator_webauthn.models import UserVerification, WebAuthnDevice
from authentik.stages.authenticator_webauthn.stage import PLAN_CONTEXT_WEBAUTHN_CHALLENGE
from authentik.stages.authenticator_webauthn.utils import get_origin, get_rp_id
from authentik.stages.password.stage import PLAN_CONTEXT_METHOD_ARGS
LOGGER = get_logger()
if TYPE_CHECKING:
@@ -150,11 +149,7 @@ def validate_challenge_code(code: str, stage_view: StageView, user: User) -> Dev
credentials={"username": user.username},
request=stage_view.request,
stage=stage_view.executor.current_stage,
context={
PLAN_CONTEXT_METHOD_ARGS: {
"device_class": DeviceClasses.TOTP.value,
}
},
device_class=DeviceClasses.TOTP.value,
)
raise ValidationError(
_("Invalid Token. Please ensure the time on your device is accurate and try again.")
@@ -226,13 +221,9 @@ def validate_challenge_webauthn(
credentials={"username": user.username},
request=stage_view.request,
stage=stage_view.executor.current_stage,
context={
PLAN_CONTEXT_METHOD_ARGS: {
"device": device,
"device_class": DeviceClasses.WEBAUTHN.value,
"device_type": device.device_type,
},
},
device=device,
device_class=DeviceClasses.WEBAUTHN.value,
device_type=device.device_type,
)
raise ValidationError("Assertion failed") from exc
@@ -282,12 +273,8 @@ def validate_challenge_duo(device_pk: int, stage_view: StageView, user: User) ->
credentials={"username": user.username},
request=stage_view.request,
stage=stage_view.executor.current_stage,
context={
PLAN_CONTEXT_METHOD_ARGS: {
"device_class": DeviceClasses.DUO.value,
"duo_response": response,
}
},
device_class=DeviceClasses.DUO.value,
duo_response=response,
)
raise ValidationError("Duo denied access", code="denied")
return device

View File

@@ -1,5 +1,6 @@
import pglock
from django.utils.timezone import now, timedelta
from datetime import timedelta
from django.utils.timezone import now
from drf_spectacular.utils import extend_schema, inline_serializer
from packaging.version import parse
from rest_framework.fields import BooleanField, CharField
@@ -31,18 +32,13 @@ class WorkerView(APIView):
def get(self, request: Request) -> Response:
response = []
our_version = parse(authentik_full_version())
for status in WorkerStatus.objects.filter(last_seen__gt=now() - timedelta(minutes=2)):
lock_id = f"goauthentik.io/worker/status/{status.pk}"
with pglock.advisory(lock_id, timeout=0, side_effect=pglock.Return) as acquired:
# The worker doesn't hold the lock, it isn't running
if acquired:
continue
version_matching = parse(status.version) == our_version
response.append(
{
"worker_id": f"{status.pk}@{status.hostname}",
"version": status.version,
"version_matching": version_matching,
}
)
for status in WorkerStatus.objects.filter(last_seen__gt=now() - timedelta(seconds=45)):
version_matching = parse(status.version) == our_version
response.append(
{
"worker_id": f"{status.pk}@{status.hostname}",
"version": status.version,
"version_matching": version_matching,
}
)
return Response(response)

View File

@@ -1,42 +1,23 @@
import socket
from collections.abc import Callable
from http.server import BaseHTTPRequestHandler
from threading import Event as TEvent
from threading import Thread, current_thread
from typing import Any, cast
import pglock
from django.db import OperationalError, connections, transaction
from django.utils.timezone import now
from django.db import OperationalError
from django_dramatiq_postgres.middleware import (
CurrentTask as BaseCurrentTask,
)
from django_dramatiq_postgres.middleware import (
HTTPServer,
HTTPServerThread,
)
from django_dramatiq_postgres.middleware import (
MetricsMiddleware as BaseMetricsMiddleware,
)
from django_dramatiq_postgres.middleware import (
_MetricsHandler as BaseMetricsHandler,
)
from dramatiq import Worker
from dramatiq.broker import Broker
from dramatiq.message import Message
from dramatiq.middleware import Middleware
from psycopg.errors import Error
from setproctitle import setthreadtitle
from structlog.stdlib import get_logger
from authentik import authentik_full_version
from authentik.events.models import Event, EventAction
from authentik.lib.config import CONFIG
from authentik.lib.sentry import should_ignore_exception
from authentik.lib.utils.reflection import class_to_path
from authentik.root.monitoring import monitoring_set
from authentik.root.signals import post_startup, pre_startup, startup
from authentik.tasks.models import Task, TaskLog, TaskStatus, WorkerStatus
from authentik.tasks.models import Task, TaskLog, TaskStatus
from authentik.tenants.models import Tenant
from authentik.tenants.utils import get_current_tenant
@@ -193,154 +174,15 @@ class DescriptionMiddleware(Middleware):
return {"description"}
class _healthcheck_handler(BaseHTTPRequestHandler):
def log_request(self, code="-", size="-"):
HEALTHCHECK_LOGGER.info(
self.path,
method=self.command,
status=code,
)
def log_error(self, format, *args):
HEALTHCHECK_LOGGER.warning(format, *args)
def do_HEAD(self):
try:
for db_conn in connections.all():
# Force connection reload
db_conn.connect()
_ = db_conn.cursor()
self.send_response(200)
except DB_ERRORS: # pragma: no cover
self.send_response(503)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.send_header("Content-Length", "0")
self.end_headers()
do_GET = do_HEAD
class WorkerHealthcheckMiddleware(Middleware):
thread: HTTPServerThread | None
def __init__(self):
listen = CONFIG.get("listen.http", ["[::]:9000"])
if isinstance(listen, str):
listen = listen.split(",")
host, _, port = listen[0].rpartition(":")
try:
port = int(port)
except ValueError:
LOGGER.error(f"Invalid port entered: {port}")
self.host, self.port = host, port
def after_worker_boot(self, broker: Broker, worker: Worker):
self.thread = HTTPServerThread(
target=WorkerHealthcheckMiddleware.run, args=(self.host, self.port)
)
self.thread.start()
def before_worker_shutdown(self, broker: Broker, worker: Worker):
server = self.thread.server
if server:
server.shutdown()
LOGGER.debug("Stopping WorkerHealthcheckMiddleware")
self.thread.join()
@staticmethod
def run(addr: str, port: int):
setthreadtitle("authentik Worker Healthcheck server")
try:
server = HTTPServer((addr, port), _healthcheck_handler)
thread = cast(HTTPServerThread, current_thread())
thread.server = server
server.serve_forever()
except OSError as exc:
get_logger(__name__, type(WorkerHealthcheckMiddleware)).warning(
"Port is already in use, not starting healthcheck server",
exc=exc,
)
class WorkerStatusMiddleware(Middleware):
thread: Thread | None
thread_event: TEvent | None
def after_worker_boot(self, broker: Broker, worker: Worker):
self.thread_event = TEvent()
self.thread = Thread(target=WorkerStatusMiddleware.run, args=(self.thread_event,))
self.thread.start()
def before_worker_shutdown(self, broker: Broker, worker: Worker):
self.thread_event.set()
LOGGER.debug("Stopping WorkerStatusMiddleware")
self.thread.join()
@staticmethod
def run(event: TEvent):
setthreadtitle("authentik Worker status")
with transaction.atomic():
hostname = socket.gethostname()
WorkerStatus.objects.filter(hostname=hostname).delete()
status, _ = WorkerStatus.objects.update_or_create(
hostname=hostname,
version=authentik_full_version(),
)
while not event.is_set():
try:
WorkerStatusMiddleware.keep(event, status)
except DB_ERRORS: # pragma: no cover
event.wait(10)
try:
connections.close_all()
except DB_ERRORS:
pass
@staticmethod
def keep(event: TEvent, status: WorkerStatus):
lock_id = f"goauthentik.io/worker/status/{status.pk}"
with pglock.advisory(lock_id, side_effect=pglock.Raise):
while not event.is_set():
status.refresh_from_db()
old_last_seen = status.last_seen
status.last_seen = now()
if old_last_seen != status.last_seen:
status.save(update_fields=("last_seen",))
event.wait(30)
class _MetricsHandler(BaseMetricsHandler):
def do_GET(self) -> None:
monitoring_set.send_robust(self)
return super().do_GET()
class MetricsMiddleware(BaseMetricsMiddleware):
thread: HTTPServerThread | None
handler_class = _MetricsHandler
@property
def forks(self) -> list[Callable[[], None]]:
def forks(self):
return []
def after_worker_boot(self, broker: Broker, worker: Worker):
listen = CONFIG.get("listen.metrics", ["[::]:9300"])
if isinstance(listen, str):
listen = listen.split(",")
addr, _, port = listen[0].rpartition(":")
def before_worker_boot(self, broker: Broker, worker: Any) -> None:
from prometheus_client import values
from prometheus_client.values import MultiProcessValue
try:
port = int(port)
except ValueError:
LOGGER.error(f"Invalid port entered: {port}")
self.thread = HTTPServerThread(target=MetricsMiddleware.run, args=(addr, port))
self.thread.start()
values.ValueClass = MultiProcessValue(lambda: worker.worker_id)
def before_worker_shutdown(self, broker: Broker, worker: Worker):
server = self.thread.server
if server:
server.shutdown()
LOGGER.debug("Stopping MetricsMiddleware")
self.thread.join()
return super().before_worker_boot(broker, worker)

View File

@@ -1,4 +1,6 @@
from django.utils.timezone import now, timedelta
from datetime import timedelta
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from dramatiq import actor

View File

@@ -10,7 +10,6 @@ from dramatiq.results.middleware import Results
from dramatiq.worker import Worker, _ConsumerThread, _WorkerThread
from authentik.tasks.broker import PostgresBroker
from authentik.tasks.middleware import WorkerHealthcheckMiddleware
TESTING_QUEUE = "testing"
@@ -18,6 +17,7 @@ TESTING_QUEUE = "testing"
class TestWorker(Worker):
def __init__(self, broker: Broker):
super().__init__(broker=broker)
self.worker_id = 1000
self.work_queue = PriorityQueue()
self.consumers = {
TESTING_QUEUE: _ConsumerThread(
@@ -82,8 +82,6 @@ def use_test_broker():
middleware: Middleware = import_string(middleware_class)(
**middleware_kwargs,
)
if isinstance(middleware, WorkerHealthcheckMiddleware):
middleware.port = 9102
if isinstance(middleware, Retries):
middleware.max_retries = 0
if isinstance(middleware, Results):

View File

@@ -44,8 +44,6 @@ class FlagsJSONExtension(OpenApiSerializerFieldExtension):
for flag in Flag.available():
_flag = flag()
props[_flag.key] = build_basic_type(get_args(_flag.__orig_bases__[0])[0])
if _flag.description:
props[_flag.key]["description"] = _flag.description
return build_object_type(props, required=props.keys())

View File

@@ -14,7 +14,6 @@ if TYPE_CHECKING:
class Flag[T]:
default: T | None = None
visibility: Literal["none"] | Literal["public"] | Literal["authenticated"] = "none"
description: str | None = None
def __init_subclass__(cls, key: str, **kwargs):
cls.__key = key

4
go.mod
View File

@@ -19,7 +19,7 @@ require (
github.com/gorilla/sessions v1.4.0
github.com/gorilla/websocket v1.5.3
github.com/grafana/pyroscope-go v1.2.7
github.com/jackc/pgx/v5 v5.9.1
github.com/jackc/pgx/v5 v5.8.0
github.com/jellydator/ttlcache/v3 v3.4.0
github.com/mitchellh/mapstructure v1.5.0
github.com/nmcclain/asn1-ber v0.0.0-20170104154839-2661553a0484
@@ -30,7 +30,7 @@ require (
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
github.com/wwt/guac v1.3.2
goauthentik.io/api/v3 v3.2026020.17-0.20260323171523-ab05463a3eba
goauthentik.io/api/v3 v3.2026020.17-0.20260317190750-6ec0d12b221b
golang.org/x/exp v0.0.0-20230210204819-062eb4c674ab
golang.org/x/oauth2 v0.36.0
golang.org/x/sync v0.20.0

6
go.sum
View File

@@ -117,8 +117,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc=
github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@@ -215,8 +215,6 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
goauthentik.io/api/v3 v3.2026020.17-0.20260317190750-6ec0d12b221b h1:p+iDEXjvC15pC1VscaR59Vud9/c/xeNeTFmlv4arkNI=
goauthentik.io/api/v3 v3.2026020.17-0.20260317190750-6ec0d12b221b/go.mod h1:uYa+yGMglhJy8ymyUQ8KQiJjOb3UZTuPQ24Ot2s9BCo=
goauthentik.io/api/v3 v3.2026020.17-0.20260323171523-ab05463a3eba h1:qwBygmfe8YE7m2pObvrUFC17tdaRIe84w1qjHGvBJ4w=
goauthentik.io/api/v3 v3.2026020.17-0.20260323171523-ab05463a3eba/go.mod h1:uYa+yGMglhJy8ymyUQ8KQiJjOb3UZTuPQ24Ot2s9BCo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=

View File

@@ -31,7 +31,6 @@ const ConfigLogLevel = "log_level"
// APIController main controller which connects to the authentik api via http and ws
type APIController struct {
akURL url.URL
Client *api.APIClient
Outpost api.Outpost
GlobalConfig *api.Config
@@ -135,7 +134,6 @@ func NewAPIController(akURL url.URL, token string) *APIController {
// doGlobalSetup(outpost, akConfig)
ac := &APIController{
akURL: originalAkURL,
Client: apiClient,
GlobalConfig: akConfig,
@@ -150,7 +148,7 @@ func NewAPIController(akURL url.URL, token string) *APIController {
}
ac.logger.WithField("embedded", ac.IsEmbedded()).Info("Outpost mode")
ac.logger.WithField("offset", ac.reloadOffset.String()).Debug("HA Reload offset")
err = ac.initEvent(outpost.Pk, 0)
err = ac.initEvent(originalAkURL, outpost.Pk)
if err != nil {
go ac.recentEvents()
}

View File

@@ -32,11 +32,9 @@ func (ac *APIController) getWebsocketURL(akURL url.URL, outpostUUID string, quer
return wsUrl
}
func (ac *APIController) initEvent(outpostUUID string, attempt int) error {
akURL := ac.akURL
func (ac *APIController) initEvent(akURL url.URL, outpostUUID string) error {
query := akURL.Query()
query.Set("instance_uuid", ac.instanceUUID.String())
query.Set("attempt", strconv.Itoa(attempt))
authHeader := fmt.Sprintf("Bearer %s", ac.token)
@@ -108,10 +106,18 @@ func (ac *APIController) recentEvents() {
return
}
ac.wsIsReconnecting = true
u := url.URL{
Host: ac.Client.GetConfig().Host,
Scheme: ac.Client.GetConfig().Scheme,
Path: strings.ReplaceAll(ac.Client.GetConfig().Servers[0].URL, "api/v3", ""),
}
attempt := 1
_ = retry.Do(
func() error {
err := ac.initEvent(ac.Outpost.Pk, attempt)
q := u.Query()
q.Set("attempt", strconv.Itoa(attempt))
u.RawQuery = q.Encode()
err := ac.initEvent(u, ac.Outpost.Pk)
attempt += 1
if err != nil {
return err

View File

@@ -52,7 +52,6 @@ func RunMetricsServer(listen string, router *mux.Router) {
func RunMetricsUnix(router *mux.Router) {
socketPath := path.Join(os.TempDir(), MetricsSocketName)
l := log.WithField("logger", "authentik.outpost.metrics").WithField("listen", socketPath)
_ = os.Remove(socketPath)
ln, err := unix.Listen(socketPath)
if err != nil {
l.WithError(err).Warning("failed to listen")
@@ -60,7 +59,6 @@ func RunMetricsUnix(router *mux.Router) {
}
defer func() {
err := ln.Close()
_ = os.Remove(socketPath)
if err != nil {
l.WithError(err).Warning("failed to close listener")
}

View File

@@ -37,6 +37,7 @@ func (ws *WebServer) runMetricsServer(listen string) {
l.WithError(err).Warning("failed to get upstream metrics")
return
}
re.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ws.metricsKey))
res, err := ws.upstreamHttpClient().Do(re)
if err != nil {
l.WithError(err).Warning("failed to get upstream metrics")

View File

@@ -31,6 +31,7 @@ import (
const (
SocketName = "authentik.sock"
IPCKeyFile = "authentik-core-ipc.key"
MetricsKeyFile = "authentik-core-metrics.key"
CoreSocketName = "authentik-core.sock"
)
@@ -51,7 +52,8 @@ type WebServer struct {
upstreamClient *http.Client
upstreamURL *url.URL
ipcKey string
metricsKey string
ipcKey string
}
func NewWebServer() *WebServer {
@@ -90,7 +92,6 @@ func NewWebServer() *WebServer {
upstreamClient: upstreamClient,
upstreamURL: u,
}
ws.mainRouter.PathPrefix(config.Get().Web.Path).Path("/-/metrics/").Handler(http.NotFoundHandler())
ws.configureStatic()
ws.configureProxy()
// Redirect for sub-folder
@@ -121,7 +122,15 @@ func (ws *WebServer) upstreamHealthcheck() bool {
func (ws *WebServer) prepareKeys() {
tmp := os.TempDir()
key := base64.StdEncoding.EncodeToString(securecookie.GenerateRandomKey(64))
err := os.WriteFile(path.Join(tmp, IPCKeyFile), []byte(key), 0o600)
err := os.WriteFile(path.Join(tmp, MetricsKeyFile), []byte(key), 0o600)
if err != nil {
ws.log.WithError(err).Warning("failed to save metrics key")
return
}
ws.metricsKey = key
key = base64.StdEncoding.EncodeToString(securecookie.GenerateRandomKey(64))
err = os.WriteFile(path.Join(tmp, IPCKeyFile), []byte(key), 0o600)
if err != nil {
ws.log.WithError(err).Warning("failed to save ipc key")
return
@@ -219,7 +228,11 @@ func (ws *WebServer) Shutdown() {
ws.log.Info("shutting down gunicorn")
ws.g.Kill()
tmp := os.TempDir()
err := os.Remove(path.Join(tmp, IPCKeyFile))
err := os.Remove(path.Join(tmp, MetricsKeyFile))
if err != nil {
ws.log.WithError(err).Warning("failed to remove metrics key file")
}
err = os.Remove(path.Join(tmp, IPCKeyFile))
if err != nil {
ws.log.WithError(err).Warning("failed to remove ipc key file")
}
@@ -234,7 +247,6 @@ func (ws *WebServer) listenUnix(listen string) {
}
defer func() {
err := ln.Close()
_ = os.Remove(listen)
if err != nil {
ws.log.WithField("listen", listen).WithError(err).Warning("failed to close listener")
}

View File

@@ -1,10 +1,6 @@
#!/usr/bin/env -S bash
set -e -o pipefail
MODE_FILE="${TMPDIR}/authentik-mode"
#!/usr/bin/env bash
if [[ -z "${PROMETHEUS_MULTIPROC_DIR}" ]]; then
export PROMETHEUS_MULTIPROC_DIR="${TMPDIR:-/tmp}/authentik_prometheus_tmp"
fi
set -e -o pipefail
function log {
printf '{"event": "%s", "level": "info", "logger": "bootstrap"}\n' "$@" >&2
@@ -15,10 +11,18 @@ function wait_for_db {
log "Bootstrap completed"
}
function check_if_root {
function run_authentik {
if [[ -x "$(command -v authentik)" ]]; then
echo authentik "$@"
else
echo cargo run -- "$@"
fi
}
function check_if_root_and_run {
if [[ $EUID -ne 0 ]]; then
log "Not running as root, disabling permission fixes"
exec $1
exec $(run_authentik "$@")
return
fi
SOCKET="/var/run/docker.sock"
@@ -26,36 +30,19 @@ function check_if_root {
if [[ -e "$SOCKET" ]]; then
# Get group ID of the docker socket, so we can create a matching group and
# add ourselves to it
DOCKER_GID=$(stat -c '%g' $SOCKET)
DOCKER_GID="$(stat -c "%g" "${SOCKET}")"
# Ensure group for the id exists
getent group $DOCKER_GID || groupadd -f -g $DOCKER_GID docker
usermod -a -G $DOCKER_GID authentik
getent group "${DOCKER_GID}" || groupadd -f -g "${DOCKER_GID}" docker
usermod -a -G "${DOCKER_GID}" authentik
# since the name of the group might not be docker, we need to lookup the group id
GROUP_NAME=$(getent group $DOCKER_GID | sed 's/:/\n/g' | head -1)
GROUP_NAME=$(getent group "${DOCKER_GID}" | sed 's/:/\n/g' | head -1)
GROUP="authentik:${GROUP_NAME}"
fi
# Fix permissions of certs and media
chown -R authentik:authentik /data /certs "${PROMETHEUS_MULTIPROC_DIR}"
chmod ug+rwx /data
chmod ug+rx /certs
exec chpst -u authentik:$GROUP env HOME=/authentik $1
}
function run_authentik {
if [[ -x "$(command -v authentik)" ]]; then
exec authentik $@
else
exec go run -v ./cmd/server/ $@
fi
}
function set_mode {
echo $1 >$MODE_FILE
trap cleanup EXIT
}
function cleanup {
rm -f ${MODE_FILE}
exec chpst -u authentik:"${GROUP}" env HOME=/authentik $(run_authentik "$@")
}
function prepare_debug {
@@ -72,38 +59,31 @@ function prepare_debug {
chown authentik:authentik /unittest.xml
}
if [[ -z "${PROMETHEUS_MULTIPROC_DIR}" ]]; then
export PROMETHEUS_MULTIPROC_DIR="${TMPDIR:-/tmp}/authentik_prometheus_tmp"
fi
mkdir -p "${PROMETHEUS_MULTIPROC_DIR}"
if [[ "$(python -m authentik.lib.config debugger 2>/dev/null)" == "True" ]]; then
prepare_debug
fi
if [[ "$1" == "server" ]]; then
set_mode "server"
run_authentik
elif [[ "$1" == "worker" ]]; then
set_mode "worker"
shift
# If we have bootstrap credentials set, run bootstrap tasks outside of main server
# sync, so that we can sure the first start actually has working bootstrap
# credentials
if [[ -n "${AUTHENTIK_BOOTSTRAP_PASSWORD}" || -n "${AUTHENTIK_BOOTSTRAP_TOKEN}" ]]; then
python -m manage apply_blueprint system/bootstrap.yaml || true
fi
check_if_root "python -m manage worker --pid-file ${TMPDIR}/authentik-worker.pid $@"
elif [[ "$1" == "bash" ]]; then
/bin/bash
elif [[ "$1" == "test-all" ]]; then
prepare_debug
chmod 777 /root
check_if_root "python -m manage test authentik"
elif [[ "$1" == "healthcheck" ]]; then
run_authentik healthcheck $(cat $MODE_FILE)
if [[ "$1" == "bash" ]]; then
exec /usr/bin/env -S bash "$@"
elif [[ "$1" == "dump_config" ]]; then
shift
exec python -m authentik.lib.config $@
shift 1
exec python -m authentik.lib.config "$@"
elif [[ "$1" == "debug" ]]; then
exec sleep infinity
elif [[ "$1" == "test-all" ]]; then
wait_for_db
prepare_debug
chmod 777 /root
check_if_root_and_run manage test authentik
elif [[ "$1" == "allinone" ]] || [[ "$1" == "server" ]] || [[ "$1" == "worker" ]] || [[ "$1" == "proxy" ]] || [[ "$1" == "manage" ]]; then
wait_for_db
check_if_root_and_run "$@"
else
wait_for_db
exec python -m manage "$@"
fi

View File

@@ -78,9 +78,9 @@ RUN --mount=type=secret,id=GEOIPUPDATE_ACCOUNT_ID \
/bin/sh -c "GEOIPUPDATE_LICENSE_KEY_FILE=/run/secrets/GEOIPUPDATE_LICENSE_KEY /usr/bin/entry.sh || echo 'Failed to get GeoIP database, disabling'; exit 0"
# Stage 4: Download uv
FROM ghcr.io/astral-sh/uv:0.10.12@sha256:72ab0aeb448090480ccabb99fb5f52b0dc3c71923bffb5e2e26517a1c27b7fec AS uv
FROM ghcr.io/astral-sh/uv:0.10.11@sha256:3472e43b4e738cf911c99d41bb34331280efad54c73b1def654a6227bb59b2b4 AS uv
# Stage 5: Base python image
FROM ghcr.io/goauthentik/fips-python:3.14.3-slim-trixie-fips@sha256:bf45eb77a010d76fe6abd7ae137d1b0c44b6227cd984945042135fdf05ebf8d9 AS python-base
FROM ghcr.io/goauthentik/fips-python:3.14.3-slim-trixie-fips@sha256:859ad5743f2f6f348bf139d760722ccdefa00fde3438b1b594e3033d3e6200ad AS python-base
ENV VENV_PATH="/ak-root/.venv" \
PATH="/lifecycle:/ak-root/.venv/bin:$PATH" \

View File

@@ -31,7 +31,7 @@ RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \
go build -o /go/ldap ./cmd/ldap
# Stage 2: Run
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7726387c78b5787d2146868c2ccc8948a3591d0a5a6436f7780c8c28acc76341
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7baeeaa59f82826af960a936af473b54f51090e9165d0c8482142818add51f47
ARG VERSION
ARG GIT_BUILD_HASH

View File

@@ -47,7 +47,7 @@ RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \
go build -o /go/proxy ./cmd/proxy
# Stage 3: Run
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7726387c78b5787d2146868c2ccc8948a3591d0a5a6436f7780c8c28acc76341
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7baeeaa59f82826af960a936af473b54f51090e9165d0c8482142818add51f47
ARG VERSION
ARG GIT_BUILD_HASH

View File

@@ -31,7 +31,7 @@ RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \
go build -o /go/radius ./cmd/radius
# Stage 2: Run
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7726387c78b5787d2146868c2ccc8948a3591d0a5a6436f7780c8c28acc76341
FROM ghcr.io/goauthentik/fips-debian:trixie-slim-fips@sha256:7baeeaa59f82826af960a936af473b54f51090e9165d0c8482142818add51f47
ARG VERSION
ARG GIT_BUILD_HASH

View File

@@ -1,6 +1,8 @@
"""Gunicorn config"""
import os
import platform
import signal
from hashlib import sha512
from pathlib import Path
from tempfile import gettempdir
@@ -17,7 +19,6 @@ from authentik.lib.utils.reflection import get_env
from authentik.root.install_id import get_install_id_raw
from authentik.root.setup import setup
from lifecycle.migrate import run_migrations
from lifecycle.wait_for_db import wait_for_db
from lifecycle.worker import DjangoUvicornWorker
if TYPE_CHECKING:
@@ -28,16 +29,12 @@ if TYPE_CHECKING:
setup()
wait_for_db()
_tmp = Path(gettempdir())
worker_class = "lifecycle.worker.DjangoUvicornWorker"
worker_tmp_dir = str(_tmp.joinpath("authentik_gunicorn_tmp"))
os.makedirs(worker_tmp_dir, exist_ok=True)
bind = f"unix://{str(_tmp.joinpath('authentik-core.sock'))}"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
preload_app = True
@@ -45,11 +42,28 @@ preload_app = True
max_requests = CONFIG.get_int("web.max_requests", 1000)
max_requests_jitter = CONFIG.get_int("web.max_requests_jitter", 50)
# Match the value in src/arbiter.rs for graceful shutdown
dirty_graceful_timeout = 30
logconfig_dict = get_logger_config()
workers = CONFIG.get_int("web.workers", 2)
threads = CONFIG.get_int("web.threads", 4)
# libpq can try Kerberos/GSS on macOS, which is not fork-safe in our Gunicorn worker model.
# Disable GSS negotiation for local/dev PostgreSQL connections on Darwin.
if platform.system() == "Darwin":
os.environ.setdefault("PGGSSENCMODE", "disable")
# Avoid macOS SystemConfiguration proxy lookups (_scproxy) in forked workers.
# urllib/requests may consult these APIs and can crash in child workers.
os.environ.setdefault("NO_PROXY", "*")
os.environ.setdefault("no_proxy", "*")
def when_ready(server: "Arbiter"): # noqa: UP037
# Notify rust process that we are ready
os.kill(os.getppid(), signal.SIGUSR1)
def post_fork(server: "Arbiter", worker: DjangoUvicornWorker): # noqa: UP037
"""Tell prometheus to use worker number instead of process ID for multiprocess"""

148
lifecycle/worker_process.py Normal file
View File

@@ -0,0 +1,148 @@
#!/usr/bin/env python3
import os
import random
import signal
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
from socket import AF_UNIX
from threading import Event, Thread
from typing import Any
from dramatiq import Worker, get_broker
from structlog.stdlib import get_logger
from authentik.lib.config import CONFIG
LOGGER = get_logger()
INITIAL_WORKER_ID = 1000
class HttpHandler(BaseHTTPRequestHandler):
def check_db(self):
from django.db import connections
for db_conn in connections.all():
# Force connection reload
db_conn.connect()
_ = db_conn.cursor()
def do_GET(self):
if self.path == "/-/metrics/":
from authentik.root.monitoring import monitoring_set
monitoring_set.send_robust(self)
self.send_response(200)
self.end_headers()
elif self.path == "/-/health/ready/":
from django.db.utils import OperationalError
try:
self.check_db()
except OperationalError:
self.send_response(503)
self.send_response(200)
self.end_headers()
else:
self.send_response(200)
self.end_headers()
def log_message(self, format: str, *args: Any) -> None:
pass
class UnixSocketServer(HTTPServer):
address_family = AF_UNIX
def main(worker_id: int, socket_path: str | None):
shutdown = Event()
srv = None
def immediate_shutdown(signum, frame):
nonlocal srv
if srv is not None:
srv.shutdown()
if socket_path:
os.remove(socket_path)
sys.exit(0)
def graceful_shutdown(signum, frame):
nonlocal shutdown
shutdown.set()
signal.signal(signal.SIGHUP, immediate_shutdown)
signal.signal(signal.SIGINT, immediate_shutdown)
signal.signal(signal.SIGQUIT, immediate_shutdown)
signal.signal(signal.SIGTERM, graceful_shutdown)
random.seed()
logger = LOGGER.bind(worker_id=worker_id)
logger.debug("Loading broker...")
broker = get_broker()
broker.emit_after("process_boot")
logger.debug("Starting worker threads...")
queues = None # all queues
worker = Worker(broker, queues=queues, worker_threads=CONFIG.get_int("worker.threads"))
worker.worker_id = worker_id
worker.start()
logger.info("Worker process is ready for action.")
if socket_path:
srv = UnixSocketServer(socket_path, HttpHandler)
Thread(target=srv.serve_forever).start()
# Notify rust process that we are ready
os.kill(os.getppid(), signal.SIGUSR2)
shutdown.wait()
logger.info("Shutting down worker...")
if srv is not None:
srv.shutdown()
if socket_path:
os.remove(socket_path)
# 5 secs if debug, 5 mins otherwise
worker.stop(timeout=5_000 if CONFIG.get_bool("debug") else 600_000)
broker.close()
logger.info("Worker shut down.")
if __name__ == "__main__":
if len(sys.argv) not in [2, 3]:
print("USAGE: worker_process <worker_id> [SOCKET_PATH]")
sys.exit(1)
worker_id = int(sys.argv[1])
socket_path = sys.argv[2] if len(sys.argv) == 3 else None # noqa: PLR2004
from authentik.root.setup import setup
setup()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
import django
django.setup()
from django.core.management import execute_from_command_line
if socket_path:
from lifecycle.migrate import run_migrations
run_migrations()
if (
"AUTHENTIK_BOOTSTRAP_PASSWORD" in os.environ
or "AUTHENTIK_BOOTSTRAP_TOKEN" in os.environ
):
try:
execute_from_command_line(["", "apply_blueprint", "system/bootstrap.yaml"])
except Exception as exc: # noqa: BLE001
sys.stderr.write(f"Failed to apply bootstrap blueprint: {exc}")
main(worker_id, socket_path)

View File

@@ -6,17 +6,15 @@
# Translators:
# Charles Leclerc, 2025
# Marc Schmitt, 2025
# Esteban, 2026
# Sp P, 2026
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-03-16 00:18+0000\n"
"POT-Creation-Date: 2026-02-10 19:27+0000\n"
"PO-Revision-Date: 2025-12-01 19:09+0000\n"
"Last-Translator: Sp P, 2026\n"
"Last-Translator: Marc Schmitt, 2025\n"
"Language-Team: French (France) (https://app.transifex.com/authentik/teams/119923/fr_FR/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -41,10 +39,6 @@ msgid ""
"hyphens (-), underscores (_), forward slashes (/), and the placeholder "
"%(theme)s for theme-specific files"
msgstr ""
"Le nom du fichier ne peut contenir que des lettres (a-z, A-Z), nombres "
"(0-9), points (.), tirets (-), tirets bas (_), barres obliques \"slash\" "
"(/), and l'élément de substitution %(theme)s pour les fichiers spécifiques "
"aux thèmes"
#: authentik/admin/files/validation.py
msgid "File name cannot contain duplicate /"
@@ -178,7 +172,7 @@ msgid ""
"Domain that activates this brand. Can be a superset, i.e. `a.b` for `aa.b` "
"and `ba.b`"
msgstr ""
"Domaine qui active cette marque. Peut être un super-ensemble, c'est-à-dire "
"Domain qui active cette marque. Peut être un super-ensemble, c'est-à-dire "
"`a.b` pour `aa.b` et `ba.b`"
#: authentik/brands/models.py
@@ -274,33 +268,30 @@ msgstr "Les segments vides dans le chemin utilisateur ne sont pas autorisés."
#: authentik/core/api/users.py
msgid "Can't change internal service account to other user type."
msgstr ""
"Impossible de changer le compte de service interne vers un autre type "
"d'utilisateur."
#: authentik/core/api/users.py
msgid "Setting a user to internal service account is not allowed."
msgstr ""
"Définir un utilisateur comme compte de service interne n'est pas autorisé."
#: authentik/core/api/users.py
msgid "Can't modify internal service account users"
msgstr "Impossible de modifier les utilisateurs du compte de service interne"
msgstr ""
#: authentik/core/api/users.py
msgid "filter: failed to parse JSON"
msgstr "filtre : échec de l'analyse du JSON"
msgstr ""
#: authentik/core/api/users.py
msgid "filter: value must be key:value mapping"
msgstr "filtre : la valeur doit être un mapping clé:valeur"
msgstr ""
#: authentik/core/api/users.py
msgid "No recovery flow set."
msgstr "Aucun flux de récupération défini."
msgstr ""
#: authentik/core/api/users.py
msgid "Recovery flow not applicable to user"
msgstr "Flux de récupération non applicable pour l'utilisateur"
msgstr ""
#: authentik/core/api/users.py
msgid "A user/group with these details already exists"
@@ -316,15 +307,15 @@ msgstr "Une erreur inconnue est parvenue"
#: authentik/core/api/users.py
msgid "User does not have an email address set."
msgstr "L'utilisateur n'a pas d'adresse de courriel définie."
msgstr ""
#: authentik/core/api/users.py
msgid "Email stage not found."
msgstr "Étape de courriel non trouvé."
msgstr ""
#: authentik/core/api/users.py
msgid "This field is required."
msgstr "Ce champ est obligatoire."
msgstr "Ce champ est requis."
#: authentik/core/models.py
msgid "name"
@@ -336,7 +327,7 @@ msgstr "Les utilisateurs ajoutés à ce groupe seront des super-utilisateurs."
#: authentik/core/models.py
msgid "Group"
msgstr "Groupe"
msgstr "Group"
#: authentik/core/models.py
msgid "Groups"
@@ -352,11 +343,11 @@ msgstr "Retirer l'utilisateur du groupe"
#: authentik/core/models.py
msgid "Enable superuser status"
msgstr "Activer l'état super-utilisateur"
msgstr "Activer le statut super-utilisateur"
#: authentik/core/models.py
msgid "Disable superuser status"
msgstr "Désactiver l'état super-utilisateur"
msgstr "Désactiver le statut super-utilisateur"
#: authentik/core/models.py
msgid "Group Parentage Node"
@@ -395,7 +386,7 @@ msgstr ""
#: authentik/core/models.py
msgid "View applications the user has access to"
msgstr "Voir les applications auxquelles l'utilisateur a accès"
msgstr "Voir les applications auquel l'utilisateur a accès"
#: authentik/core/models.py
msgid ""
@@ -411,7 +402,7 @@ msgstr "Flux utilisé lors de l'autorisation de ce fournisseur."
#: authentik/core/models.py
msgid "Flow used ending the session from a provider."
msgstr "Flux utilisé lorsque la session est terminée par un fournisseur."
msgstr "Flux utilisé lorsque la session est terminée depuis un fournisseur."
#: authentik/core/models.py
msgid ""
@@ -459,16 +450,15 @@ msgid ""
"Link to a user with identical email address. Can have security implications "
"when a source doesn't validate email addresses."
msgstr ""
"Lier à un utilisateur avec une adresse de courriel identique. Peut avoir des"
" implications de sécurité lorsqu'une source ne valide pas les adresses de "
"courriel."
"Lier à un utilisateur avec une adresse email identique. Peut avoir des "
"implications de sécurité lorsqu'une source ne valide pas les adresses email."
#: authentik/core/models.py
msgid ""
"Use the user's email address, but deny enrollment when the email address "
"already exists."
msgstr ""
"Utiliser l'adresse de courriel de l'utilisateur, mais refuser l'inscription "
"Utiliser l'adresse courriel de l'utilisateur, mais refuser l'inscription "
"lorsque celle-ci existe déjà."
#: authentik/core/models.py
@@ -626,14 +616,6 @@ msgstr "Supprime les utilisateurs temporaires créés par les sources SAML."
msgid "Go home"
msgstr "Retourner à l'accueil"
#: authentik/core/templates/login/base_full.html
msgid "Site footer"
msgstr "Pied de page du site"
#: authentik/core/templates/login/base_full.html
msgid "Flow links"
msgstr "Liens de flux"
#: authentik/core/templates/login/base_full.html
#: authentik/flows/templates/if/flow-sfe.html
msgid "Powered by authentik"
@@ -678,7 +660,7 @@ msgstr "RSA"
#: authentik/crypto/models.py
msgid "Elliptic Curve"
msgstr "Courbe elliptique"
msgstr "Elliptic Curve"
#: authentik/crypto/models.py
msgid "DSA"
@@ -693,33 +675,32 @@ msgid ""
"Optional Private Key. If this is set, you can use this keypair for "
"encryption."
msgstr ""
"Clé privée optionnelle. Si définie, vous pouvez l'utiliser pour le "
"Clé privée optionnelle. Si définie, vous pouvez utiliser pour le "
"chiffrement."
#: authentik/crypto/models.py
msgid "Key algorithm type detected from the certificate's public key"
msgstr ""
"Type d'algorithme de la clé, détecté depuis la clé publique du certificat"
#: authentik/crypto/models.py
msgid "Certificate expiry date"
msgstr "Date d'expiration du certificat"
msgstr ""
#: authentik/crypto/models.py
msgid "Certificate subject as RFC4514 string"
msgstr "Sujet du certificat, en tant que chaîne de caractères RFC4514"
msgstr ""
#: authentik/crypto/models.py
msgid "SHA256 fingerprint of the certificate"
msgstr "Empreinte SHA256 du certificat"
msgstr ""
#: authentik/crypto/models.py
msgid "SHA1 fingerprint of the certificate"
msgstr "Empreinte SHA1 du certificat"
msgstr ""
#: authentik/crypto/models.py
msgid "Key ID generated from private key"
msgstr "ID de clé généré par la clé privée"
msgstr ""
#: authentik/crypto/models.py
msgid "Certificate-Key Pair"
@@ -743,10 +724,6 @@ msgstr ""
"Découvre, importe et met à jour les certificats depuis le système de "
"fichiers."
#: authentik/endpoints/api/stages.py
msgid "Selected connector is not compatible with this stage."
msgstr "Le connecteur sélectionné n'est pas compatible avec cette étape."
#: authentik/endpoints/connectors/agent/api/connectors.py
msgid "Selected platform not supported"
msgstr "La plateforme sélectionnée n'est pas supportée"
@@ -803,15 +780,13 @@ msgstr "Nonces Apple"
#: authentik/endpoints/facts.py
msgid "Operating System name, such as 'Server 2022' or 'Ubuntu'"
msgstr "Nom du système d'exploitation, comme 'Server 2022' ou 'Ubuntu'"
msgstr ""
#: authentik/endpoints/facts.py
msgid ""
"Operating System version, must always be the version number but may contain "
"build name"
msgstr ""
"Version du système d'exploitation, qui doit toujours contenir le numéro de "
"version mais peut aussi contenir le nom de la compilation"
#: authentik/endpoints/models.py
msgid "Device"
@@ -871,7 +846,7 @@ msgstr "Entreprise est requis pour créer/mettre à jour cet objet."
#: authentik/enterprise/api.py
msgid "Enterprise is required to use this endpoint."
msgstr "Entreprise est requis pour utiliser ce point de terminaison."
msgstr ""
#: authentik/enterprise/endpoints/connectors/fleet/models.py
#: authentik/events/models.py
@@ -884,107 +859,83 @@ msgstr ""
#: authentik/enterprise/endpoints/connectors/fleet/models.py
msgid "Fleet Connector"
msgstr "Connecteurs Fleet"
msgstr ""
#: authentik/enterprise/endpoints/connectors/fleet/models.py
msgid "Fleet Connectors"
msgstr "Connecteur Fleet"
#: authentik/enterprise/endpoints/connectors/google_chrome/models.py
msgid "Google Device Trust Connector"
msgstr "Connecteur Google Device Trust"
#: authentik/enterprise/endpoints/connectors/google_chrome/models.py
msgid "Google Device Trust Connectors"
msgstr "Connecteurs Google Device Trust"
#: authentik/enterprise/endpoints/connectors/google_chrome/stage.py
#: authentik/enterprise/stages/authenticator_endpoint_gdtc/stage.py
msgid "Verifying your browser..."
msgstr "Vérification de votre navigateur..."
msgstr ""
#: authentik/enterprise/lifecycle/api/reviews.py
msgid "You are not allowed to submit a review for this object."
msgstr "Vous n'êtes pas autorisé à soumettre une révision pour cet objet."
msgstr ""
#: authentik/enterprise/lifecycle/api/rules.py
msgid "Object does not exist"
msgstr "L'objet n'existe pas"
msgstr ""
#: authentik/enterprise/lifecycle/api/rules.py
msgid "Either a reviewer group or a reviewer must be set."
msgstr "Soit un groupe de réviseurs soit un réviseur doit être défini."
msgstr ""
#: authentik/enterprise/lifecycle/api/rules.py
msgid "Grace period must be shorter than the interval."
msgstr "La période de grâce doit être plus courte que l'intervalle."
msgstr ""
#: authentik/enterprise/lifecycle/api/rules.py
msgid "Only one type-wide rule for each object type is allowed."
msgstr ""
"Une seule règle pour l'ensemble du type est autorisée pour chaque type "
"d'objet."
#: authentik/enterprise/lifecycle/models.py
msgid ""
"Select which transports should be used to notify the reviewers. If none are "
"selected, the notification will only be shown in the authentik UI."
msgstr ""
"Sélectionnez quels moyens de transports doivent être utilisés pour notifier "
"les réviseurs. Si aucun n'est sélectionné, la notification sera uniquement "
"affichée dans l'interface d'authentik."
#: authentik/enterprise/lifecycle/models.py
msgid "Reviewed"
msgstr "Révisé"
msgstr ""
#: authentik/enterprise/lifecycle/models.py
msgid "Pending"
msgstr "En attente"
msgstr ""
#: authentik/enterprise/lifecycle/models.py
msgid "Overdue"
msgstr "En retard"
msgstr ""
#: authentik/enterprise/lifecycle/models.py
msgid "Canceled"
msgstr "Annulé"
msgstr ""
#: authentik/enterprise/lifecycle/models.py
msgid "Go to {self._get_model_name()}"
msgstr "Aller à {self._get_model_name()}"
msgstr ""
#: authentik/enterprise/lifecycle/models.py
msgid "Access review is due for {self.content_type.name} {str(self.object)}"
msgstr ""
"La révision d'accès est attendue pour {self.content_type.name} "
"{str(self.object)}"
#: authentik/enterprise/lifecycle/models.py
msgid ""
"Access review is overdue for {self.content_type.name} {str(self.object)}"
msgstr ""
"La révision d'accès est en retard pour {self.content_type.name} "
"{str(self.object)}"
#: authentik/enterprise/lifecycle/models.py
msgid ""
"Access review completed for {self.content_type.name} {str(self.object)}"
msgstr ""
"La révision d'accès est terminée pour {self.content_type.name} "
"{str(self.object)}"
#: authentik/enterprise/lifecycle/tasks.py
msgid "Dispatch tasks to validate lifecycle rules."
msgstr "Déclenche les tâches pour valider les règles de cycle de vie"
msgstr ""
#: authentik/enterprise/lifecycle/tasks.py
msgid "Apply lifecycle rule."
msgstr "Appliquer la règle de cycle de vie."
msgstr ""
#: authentik/enterprise/lifecycle/tasks.py
msgid "Send lifecycle rule notification."
msgstr "Envoyer la notification de la règle de cycle de vie."
msgstr ""
#: authentik/enterprise/models.py
msgid "License"
@@ -1113,14 +1064,10 @@ msgid ""
"Dispatch deletions for an object (user, group) for Google Workspace "
"providers."
msgstr ""
"Déclenche les suppressions pour un objet (utilisateur, groupe) pour les "
"fournisseurs de Google Workspace."
#: authentik/enterprise/providers/google_workspace/tasks.py
msgid "Delete an object (user, group) for Google Workspace provider."
msgstr ""
"Supprime un objet (utilisateur, groupe) pour le fournisseur de Google "
"Workspace."
#: authentik/enterprise/providers/google_workspace/tasks.py
msgid ""
@@ -1197,16 +1144,12 @@ msgstr ""
#: authentik/enterprise/providers/microsoft_entra/tasks.py
msgid "Delete an object (user, group) for Microsoft Entra provider."
msgstr ""
"Supprime un objet (utilisateur, groupe) pour le fournisseur de Microsoft "
"Entra."
#: authentik/enterprise/providers/microsoft_entra/tasks.py
msgid ""
"Dispatch deletions for an object (user, group) for Microsoft Entra "
"providers."
msgstr ""
"Déclenche les suppressions pour un objet (utilisateur, groupe) pour les "
"fournisseurs de Microsoft Entra."
#: authentik/enterprise/providers/microsoft_entra/tasks.py
msgid "Sync a related object (memberships) for Microsoft Entra provider."
@@ -1277,11 +1220,11 @@ msgstr "Envoye un événement SSF."
#: authentik/enterprise/providers/ws_federation/models.py
msgid "WS-Federation Provider"
msgstr "Fournisseur de fédération de web-services (WS-Federation)"
msgstr ""
#: authentik/enterprise/providers/ws_federation/models.py
msgid "WS-Federation Providers"
msgstr "Fournisseurs de fédération de web-services (WS-Federation)"
msgstr ""
#: authentik/enterprise/providers/ws_federation/views.py
#: authentik/providers/oauth2/views/authorize.py
@@ -1331,6 +1274,10 @@ msgstr "Appareil point de terminaison"
msgid "Endpoint Devices"
msgstr "Appareils point de terminaison"
#: authentik/enterprise/stages/authenticator_endpoint_gdtc/stage.py
msgid "Verifying your browser..."
msgstr "Vérification de votre navigateur..."
#: authentik/enterprise/stages/mtls/models.py
msgid ""
"Configure certificate authorities to validate the certificate against. This "
@@ -1379,7 +1326,7 @@ msgstr "Étapes Source"
#: authentik/enterprise/tasks.py
msgid "Update enterprise license status."
msgstr "Mettre à jour l'état de licence entreprise."
msgstr "Mettre à jour le statut de licence entreprise."
#: authentik/events/models.py
msgid "Event"
@@ -1414,14 +1361,6 @@ msgstr ""
"Envoyer une seule fois la notification, par exemple lors de l'envoi d'un "
"webhook dans un canal de discussion."
#: authentik/events/models.py
msgid ""
"When set, the selected ceritifcate is used to validate the certificate of "
"the webhook server."
msgstr ""
"Quand défini, le certificat sélectionné est utilisé pour valider le "
"certificat du serveur de Webhook."
#: authentik/events/models.py
msgid ""
"Customize the body of the request. Mapping should return data that is JSON-"
@@ -1598,7 +1537,7 @@ msgstr "Flux"
#: authentik/flows/exceptions.py
msgid "Flow does not apply to current user."
msgstr "Le flux ne s'applique pas à l'utilisateur actuel."
msgstr "Le flux ne s'applique pas à l'utilisateur actuel"
#: authentik/flows/models.py
#, python-brace-format
@@ -1709,6 +1648,10 @@ msgstr "Jeton du flux"
msgid "Flow Tokens"
msgstr "Jetons du flux"
#: authentik/flows/templates/if/flow.html
msgid "Site footer"
msgstr "Pied de page du site"
#: authentik/flows/views/executor.py
msgid "Invalid next URL"
msgstr "URL suivante invalide"
@@ -2170,10 +2113,10 @@ msgid ""
"that the numbers aren't too low for POSIX users. Default is 2000 to ensure "
"that we don't collide with local users uidNumber"
msgstr ""
"La première valeur des uidNumbers, ce nombre est ajouté au user.pk pour "
"s'assurer que les nombres ne sont pas trop bas pour les utilisateurs POSIX. "
"La valeur par défaut est 2000 pour s'assurer que nous n'entrons pas en "
"collision avec les uidNumber des utilisateurs locaux"
"Le début des uidNumbers, ce nombre est ajouté au user.pk pour s'assurer que "
"les nombres ne sont pas trop bas pour les utilisateurs POSIX. La valeur par "
"défaut est 2000 pour s'assurer que nous n'entrons pas en collision avec les "
"uidNumber des utilisateurs locaux"
#: authentik/providers/ldap/models.py
msgid ""
@@ -2182,9 +2125,9 @@ msgid ""
"Default is 4000 to ensure that we don't collide with local groups or users "
"primary groups gidNumber"
msgstr ""
"La première valeur des gidNumbers, ce nombre est ajouté à un nombre généré à"
" partir du group.pk pour s'assurer que les nombres ne sont pas trop bas pour"
" les groupes POSIX. La valeur par défaut est 4000 pour s'assurer que nous "
"Le début des gidNumbers, ce nombre est ajouté à un nombre généré à partir du"
" group.pk pour s'assurer que les nombres ne sont pas trop bas pour les "
"groupes POSIX. La valeur par défaut est 4000 pour s'assurer que nous "
"n'entrons pas en collision avec les groupes locaux ou les gidNumber des "
"groupes primaires des utilisateurs"
@@ -2507,7 +2450,7 @@ msgstr "Compatibilité GitHub : accès aux informations utilisateur"
#: authentik/providers/oauth2/views/userinfo.py
msgid "GitHub Compatibility: Access you Email addresses"
msgstr "Compatibilité GitHub : accès aux adresses de courriel"
msgstr "Compatibilité GitHub : accès aux adresses email"
#: authentik/providers/oauth2/views/userinfo.py
msgid "GitHub Compatibility: Access your Groups"
@@ -2610,6 +2553,10 @@ msgstr "Fournisseur Proxy"
msgid "Proxy Providers"
msgstr "Fournisseur de Proxy"
#: authentik/providers/proxy/tasks.py
msgid "Terminate session on Proxy outpost."
msgstr "Met fin à la session sur l'avant-poste Proxy."
#: authentik/providers/rac/models.py authentik/stages/user_login/models.py
msgid ""
"Determines how long a session lasts. Default of 0 means that the sessions "
@@ -2707,8 +2654,6 @@ msgstr ""
#: authentik/providers/saml/api/providers.py
msgid "Only RSA, EC, and DSA key types are supported for SAML signing."
msgstr ""
"Seuls les types de clés RSA, EC et DSA sont pris en charge pour la signature"
" SAML."
#: authentik/providers/saml/api/providers.py
msgid "Invalid XML Syntax"
@@ -2997,8 +2942,6 @@ msgstr "Salesforce"
#: authentik/providers/scim/models.py
msgid "Group filters used to define sync-scope for groups."
msgstr ""
"Les filtres de groupe utilisés pour définir le périmètre de synchronisation "
"des groupes."
#: authentik/providers/scim/models.py
msgid "Base URL to SCIM requests, usually ends in /v2"
@@ -3071,13 +3014,11 @@ msgstr ""
#: authentik/providers/scim/tasks.py
msgid "Delete an object (user, group) for SCIM provider."
msgstr "Supprime un objet (utilisateur, groupe) pour le fournisseur SCIM."
msgstr ""
#: authentik/providers/scim/tasks.py
msgid "Dispatch deletions for an object (user, group) for SCIM providers."
msgstr ""
"Déclenche les suppressions pour un objet (utilisateur, groupe) pour les "
"fournisseurs SCIM."
#: authentik/providers/scim/tasks.py
msgid "Sync a related object (memberships) for SCIM provider."
@@ -4028,8 +3969,8 @@ msgstr "Courriel invalide"
#: authentik/stages/authenticator_email/stage.py
msgid "The user already has an email address registered for MFA."
msgstr ""
"L'utilisateur a déjà une adresse de courriel enregistrée pour "
"l'authentification multi-facteurs."
"L'utilisateur a déjà une adresse e-mail enregistrée pour l'authentification "
"multi-facteurs."
#: authentik/stages/authenticator_email/templates/email/email_otp.html
#: authentik/stages/email/templates/email/password_reset.html
@@ -4061,7 +4002,7 @@ msgid ""
" "
msgstr ""
"\n"
" Si vous n'avez pas demandé ce code, veuillez ignorer ce courriel. Le code ci-dessus est valide pendant %(expires)s.\n"
" Si vous n'avez pas demandé ce code, veuillez ignorer ce courriel. Le code ci-dessus est valid pendant %(expires)s.\n"
" "
#: authentik/stages/authenticator_email/templates/email/email_otp.txt
@@ -4076,7 +4017,7 @@ msgid ""
"Email MFA code\n"
msgstr ""
"\n"
"Envoyer le code MFA par courriel\n"
"Code MFA envoyé par e-mail\n"
#: authentik/stages/authenticator_email/templates/email/email_otp.txt
#, python-format
@@ -4389,10 +4330,6 @@ msgstr "OTP Courriel"
msgid "Event Notification"
msgstr "Notification d'évènement"
#: authentik/stages/email/models.py authentik/stages/invitation/models.py
msgid "Invitation"
msgstr "Invitation"
#: authentik/stages/email/models.py
msgid ""
"The time window used to count recent account recovery attempts. If the "
@@ -4410,15 +4347,15 @@ msgstr "Activer les utilisateurs à la complétion de l'étape."
#: authentik/stages/email/models.py
msgid "Email Stage"
msgstr "Étape Courriel"
msgstr "Étape Email"
#: authentik/stages/email/models.py
msgid "Email Stages"
msgstr "Étapes Courriel"
msgstr "Étape Email"
#: authentik/stages/email/stage.py
msgid "Successfully verified Email."
msgstr "Courriel vérifié avec succès."
msgstr "Email vérifié avec succès."
#: authentik/stages/email/stage.py
msgid "No pending user."
@@ -4426,7 +4363,7 @@ msgstr "Pas d'utilisateurs en attente."
#: authentik/stages/email/stage.py
msgid "Email sent."
msgstr "Courriel envoyé."
msgstr "Email envoyé."
#: authentik/stages/email/stage.py
#, python-brace-format
@@ -4489,7 +4426,7 @@ msgid ""
" "
msgstr ""
"\n"
" Ce courriel a été envoyé depuis le transport de notification <code>%(name)s</code>.\n"
" Cet email a été envoyé depuis le transport de notification <code>%(name)s</code>.\n"
" "
#: authentik/stages/email/templates/email/event_notification.txt
@@ -4513,78 +4450,6 @@ msgstr ""
"\n"
"Cet email a été envoyé depuis le transport de notification %(name)s.\n"
#: authentik/stages/email/templates/email/invitation.html
msgid ""
"\n"
" You're Invited!\n"
" "
msgstr ""
"\n"
" Vous êtes invité !\n"
" "
#: authentik/stages/email/templates/email/invitation.html
#, python-format
msgid ""
"\n"
" You have been invited to join %(host)s. Click the button below to get started.\n"
" "
msgstr ""
"\n"
" Vous avez été invité à rejoindre %(host)s. Cliquez sur le bouton ci-dessous pour démarrer.\n"
" "
#: authentik/stages/email/templates/email/invitation.html
#, python-format
msgid ""
"\n"
" This invitation expires %(expires)s.\n"
" "
msgstr ""
"\n"
" Cette invitation expire le %(expires)s.\n"
" "
#: authentik/stages/email/templates/email/invitation.html
#: authentik/stages/email/templates/email/invitation.txt
msgid "Accept Invitation"
msgstr "Accepter l'invitation"
#: authentik/stages/email/templates/email/invitation.html
msgid ""
"\n"
" If you cannot click the button above, please copy and paste the following URL into your browser:\n"
" "
msgstr ""
"\n"
" Si cela ne fonctionne pas, copier et coller ce lien dans votre navigateur :\n"
" "
#: authentik/stages/email/templates/email/invitation.txt
msgid "You're Invited!"
msgstr "Vous êtes invité !"
#: authentik/stages/email/templates/email/invitation.txt
#, python-format
msgid ""
"You have been invited to join %(host)s. Use the link below to get started."
msgstr ""
"Vous avez été invité à rejoindre %(host)s. Utilisez le lien ci-dessous pour "
"démarrer."
#: authentik/stages/email/templates/email/invitation.txt
#, python-format
msgid "This invitation expires %(expires)s."
msgstr "Cette invitation expire le %(expires)s."
#: authentik/stages/email/templates/email/invitation.txt
msgid ""
"If you cannot click the link above, please copy and paste the following URL "
"into your browser:"
msgstr ""
"Si vous ne pouvez pas cliquer sur le lien au-dessus, merci de copier et "
"coller l'URL suivante dans votre navigateur :"
#: authentik/stages/email/templates/email/password_reset.html
msgid ""
"\n"
@@ -4603,7 +4468,7 @@ msgid ""
" "
msgstr ""
"\n"
" Si vous n'avez pas demandé de changement de mot de passe, veuillez ignorer ce courriel. Le lien ci-dessus est valide pendant %(expires)s.\n"
" Si vous n'avez pas requis de changement de mot de passe, veuillez ignorer cet e-mail. Le lien ci-dessus est valide pendant %(expires)s.\n"
" "
#: authentik/stages/email/templates/email/password_reset.txt
@@ -4621,11 +4486,11 @@ msgid ""
"If you did not request a password change, please ignore this email. The link above is valid for %(expires)s.\n"
msgstr ""
"\n"
"Si vous n'avez pas demandé de changement de mot de passe, veuillez ignorer ce courriel. Le lien ci-dessus est valide pendant %(expires)s.\n"
"Si vous n'avez pas requis de changement de mot de passe, veuillez ignorer cet e-mail. Le lien ci-dessus est valide pendant %(expires)s.\n"
#: authentik/stages/email/templates/email/setup.html
msgid "authentik Test-Email"
msgstr "Courriel de Test d'authentik"
msgstr "Email de Test d'authentik"
#: authentik/stages/email/templates/email/setup.html
msgid ""
@@ -4634,7 +4499,7 @@ msgid ""
" "
msgstr ""
"\n"
"Ceci est un courriel de test pour vous informer que vous avez configuré les courriels d'authentik avec succès."
"Ceci est un email de test pour vous informer que vous avez configuré les emails d'authentik avec succès."
#: authentik/stages/email/templates/email/setup.txt
msgid ""
@@ -4642,7 +4507,7 @@ msgid ""
"This is a test email to inform you, that you've successfully configured authentik emails.\n"
msgstr ""
"\n"
"Ceci est un courriel de test pour vous informer que vous avez configuré les courriels d'authentik avec succès.\n"
"Ceci est un email de test pour vous informer que vous avez configuré les emails d'authentik avec succès.\n"
#: authentik/stages/identification/api.py
msgid "When no user fields are selected, at least one source must be selected"
@@ -4670,7 +4535,7 @@ msgid ""
"the user's username and avatar will be shown. Otherwise, the text that the "
"user entered will be shown"
msgstr ""
"Lorsqu'un nom d'utilisateur/courriel valide a été saisi, et si cette option "
"Lorsqu'un nom d'utilisateur/e-mail valide a été saisi, et si cette option "
"est active, le nom d'utilisateur et l'avatar de l'utilisateur seront "
"affichés. Sinon, le texte que l'utilisateur a saisi sera affiché."
@@ -4718,7 +4583,7 @@ msgstr "Étapes d'identification"
#: authentik/stages/identification/stage.py
msgid "No identification data provided."
msgstr "Aucune donnée d'identification fournie."
msgstr ""
#: authentik/stages/identification/stage.py
msgid "Failed to authenticate."
@@ -4763,6 +4628,10 @@ msgstr ""
"Données statiques optionnelles à forcer lors de l'inscription des "
"utilisateurs."
#: authentik/stages/invitation/models.py
msgid "Invitation"
msgstr "Invitation"
#: authentik/stages/invitation/models.py
msgid "Invitations"
msgstr "Invitations"
@@ -4848,7 +4717,7 @@ msgstr ""
#: authentik/stages/prompt/models.py
msgid "Email: Text field with Email type."
msgstr "Courriel : champ texte de type Courriel"
msgstr "Courriel : champ texte de type email"
#: authentik/stages/prompt/models.py
msgid ""
@@ -5088,7 +4957,7 @@ msgstr "Déclencher manuellement une planification"
#: authentik/tasks/tasks.py
msgid "Remove old worker statuses."
msgstr "Supprime les anciens états des workers."
msgstr "Supprime les anciens statuts des workers."
#: authentik/tenants/api/settings.py
#, python-brace-format
@@ -5115,7 +4984,7 @@ msgstr "Activer la possibilité aux utilisateurs de changer leur nom."
#: authentik/tenants/models.py
msgid "Enable the ability for users to change their email address."
msgstr ""
"Activer la possibilité aux utilisateurs de changer leur adresse de courriel."
"Activer la possibilité aux utilisateurs de changer leur adresse email."
#: authentik/tenants/models.py
msgid "Enable the ability for users to change their username."
@@ -5176,12 +5045,10 @@ msgstr "Longueur par défaut des jetons"
#: authentik/tenants/models.py
msgid "Default page size for API responses, if no size was requested."
msgstr ""
"Taille de page par défaut pour les réponses API, si aucune taille n'a été "
"demandée."
#: authentik/tenants/models.py
msgid "Maximum page size"
msgstr "Taille maximale de la page"
msgstr ""
#: authentik/tenants/models.py
msgid "Tenant"

View File

@@ -8,12 +8,10 @@ from django.utils.autoreload import DJANGO_AUTORELOAD_ENV
from authentik.root.setup import setup
from lifecycle.migrate import run_migrations
from lifecycle.wait_for_db import wait_for_db
setup()
if __name__ == "__main__":
wait_for_db()
if (
len(sys.argv) > 1
# Explicitly only run migrate for server and worker

6
package-lock.json generated
View File

@@ -3389,9 +3389,9 @@
}
},
"node_modules/flatted": {
"version": "3.4.2",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz",
"integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==",
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.1.tgz",
"integrity": "sha512-IxfVbRFVlV8V/yRaGzk0UVIcsKKHMSfYw66T/u4nTwlWteQePsxe//LjudR1AMX4tZW3WFCh3Zqa/sjlqpbURQ==",
"license": "ISC"
},
"node_modules/for-each": {

View File

@@ -76,7 +76,7 @@ dependencies = [
[dependency-groups]
dev = [
"aws-cdk-lib==2.244.0",
"aws-cdk-lib==2.243.0",
"bandit==1.9.4",
"black==26.3.1",
"bpython==0.26",
@@ -101,12 +101,12 @@ dev = [
"pytest-timeout==2.4.0",
"pytest==9.0.2",
"requests-mock==1.12.1",
"ruff==0.15.7",
"ruff==0.15.6",
"selenium==4.41.0",
"types-channels==4.3.0.20260321",
"types-docker==7.1.0.20260322",
"types-channels==4.3.0.20250822",
"types-docker==7.1.0.20260109",
"types-jwcrypto==1.5.0.20251102",
"types-ldap3==2.9.13.20260319",
"types-ldap3==2.9.13.20251121",
"types-requests==2.32.4.20260107",
"types-zxcvbn==4.5.0.20250809",
]

View File

@@ -2733,8 +2733,12 @@ paths:
/core/applications/:
get:
operationId: core_applications_list
description: Application Viewset
description: Custom list method that checks Policy based access instead of guardian
parameters:
- in: query
name: for_user
schema:
type: integer
- in: query
name: group
schema:
@@ -2752,6 +2756,10 @@ paths:
schema:
type: string
- $ref: '#/components/parameters/QueryName'
- in: query
name: only_with_launch_url
schema:
type: boolean
- $ref: '#/components/parameters/QueryPaginationOrdering'
- $ref: '#/components/parameters/QueryPaginationPage'
- $ref: '#/components/parameters/QueryPaginationPageSize'
@@ -2760,6 +2768,10 @@ paths:
name: slug
schema:
type: string
- in: query
name: superuser_full_list
schema:
type: boolean
tags:
- core
security:
@@ -2965,59 +2977,6 @@ paths:
$ref: '#/components/responses/ValidationErrorResponse'
'403':
$ref: '#/components/responses/GenericErrorResponse'
/core/applications/@accessible/:
get:
operationId: core_applications_accessible_list
description: Get applications accessible for user
parameters:
- in: query
name: for_user
schema:
type: integer
- in: query
name: group
schema:
type: string
- in: query
name: meta_description
schema:
type: string
- in: query
name: meta_launch_url
schema:
type: string
- in: query
name: meta_publisher
schema:
type: string
- $ref: '#/components/parameters/QueryName'
- in: query
name: only_with_launch_url
schema:
type: boolean
- $ref: '#/components/parameters/QueryPaginationOrdering'
- $ref: '#/components/parameters/QueryPaginationPage'
- $ref: '#/components/parameters/QueryPaginationPageSize'
- $ref: '#/components/parameters/QuerySearch'
- in: query
name: slug
schema:
type: string
tags:
- core
security:
- authentik: []
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/PaginatedApplicationList'
description: ''
'400':
$ref: '#/components/responses/ValidationErrorResponse'
'403':
$ref: '#/components/responses/GenericErrorResponse'
/core/authenticated_sessions/:
get:
operationId: core_authenticated_sessions_list
@@ -37041,23 +37000,13 @@ components:
flags:
type: object
properties:
core_default_app_access:
type: boolean
description: Configure if applications without any policy/group/user
bindings should be accessible to any user.
enterprise_audit_include_expanded_diff:
type: boolean
description: Include additional information in audit logs, may incur
a performance penalty.
flows_continuous_login:
type: boolean
description: Upon successful authentication, re-start authentication
in other open tabs.
flows_refresh_others:
type: boolean
description: Refresh other tabs after successful authentication.
required:
- core_default_app_access
- enterprise_audit_include_expanded_diff
- flows_continuous_login
- flows_refresh_others
@@ -50825,23 +50774,13 @@ components:
flags:
type: object
properties:
core_default_app_access:
type: boolean
description: Configure if applications without any policy/group/user
bindings should be accessible to any user.
enterprise_audit_include_expanded_diff:
type: boolean
description: Include additional information in audit logs, may incur
a performance penalty.
flows_continuous_login:
type: boolean
description: Upon successful authentication, re-start authentication
in other open tabs.
flows_refresh_others:
type: boolean
description: Refresh other tabs after successful authentication.
required:
- core_default_app_access
- enterprise_audit_include_expanded_diff
- flows_continuous_login
- flows_refresh_others
@@ -55534,23 +55473,13 @@ components:
flags:
type: object
properties:
core_default_app_access:
type: boolean
description: Configure if applications without any policy/group/user
bindings should be accessible to any user.
enterprise_audit_include_expanded_diff:
type: boolean
description: Include additional information in audit logs, may incur
a performance penalty.
flows_continuous_login:
type: boolean
description: Upon successful authentication, re-start authentication
in other open tabs.
flows_refresh_others:
type: boolean
description: Refresh other tabs after successful authentication.
required:
- core_default_app_access
- enterprise_audit_include_expanded_diff
- flows_continuous_login
- flows_refresh_others
@@ -55622,23 +55551,13 @@ components:
flags:
type: object
properties:
core_default_app_access:
type: boolean
description: Configure if applications without any policy/group/user
bindings should be accessible to any user.
enterprise_audit_include_expanded_diff:
type: boolean
description: Include additional information in audit logs, may incur
a performance penalty.
flows_continuous_login:
type: boolean
description: Upon successful authentication, re-start authentication
in other open tabs.
flows_refresh_others:
type: boolean
description: Refresh other tabs after successful authentication.
required:
- core_default_app_access
- enterprise_audit_include_expanded_diff
- flows_continuous_login
- flows_refresh_others

291
src/arbiter.rs Normal file
View File

@@ -0,0 +1,291 @@
//! Utilities to manage long running tasks, such as servers and watchers.
//!
//! Also manages signals sent to the main process.
use std::{net, os::unix, sync::Arc, time::Duration};
use axum_server::Handle;
use eyre::{Report, Result};
use tokio::{
signal::unix::{Signal, SignalKind, signal},
sync::{Mutex, broadcast, watch},
task::{JoinSet, join_set::Builder},
};
use tokio_util::sync::{CancellationToken, WaitForCancellationFuture};
use tracing::info;
/// All the signal streams we watch for. We don't create those directly in [`watch_signals`]
/// because that would prevent us from handling errors early.
struct SignalStreams {
hup: Signal,
int: Signal,
quit: Signal,
usr1: Signal,
usr2: Signal,
term: Signal,
}
impl SignalStreams {
fn new() -> Result<Self> {
Ok(Self {
hup: signal(SignalKind::hangup())?,
int: signal(SignalKind::interrupt())?,
quit: signal(SignalKind::quit())?,
usr1: signal(SignalKind::user_defined1())?,
usr2: signal(SignalKind::user_defined2())?,
term: signal(SignalKind::terminate())?,
})
}
}
/// Watch for incoming signals and either shutdown the application or dispatch them to receivers.
async fn watch_signals(
streams: SignalStreams,
arbiter: Arbiter,
_signals_rx: broadcast::Receiver<SignalKind>,
) -> Result<()> {
info!("starting signals watcher");
let SignalStreams {
mut hup,
mut int,
mut quit,
mut usr1,
mut usr2,
mut term,
} = streams;
loop {
tokio::select! {
_ = hup.recv() => {
info!("signal HUP received");
arbiter.do_fast_shutdown().await;
},
_ = int.recv() => {
info!("signal INT received");
arbiter.do_fast_shutdown().await;
},
_ = quit.recv() => {
info!("signal QUIT received");
arbiter.do_fast_shutdown().await;
},
_ = usr1.recv() => {
info!("signal URS1 received");
arbiter.signals_tx.send(SignalKind::user_defined1())?;
},
_ = usr2.recv() => {
info!("USR2 received.");
arbiter.signals_tx.send(SignalKind::user_defined2())?;
},
_ = term.recv() => {
info!("signal TERM received");
arbiter.do_graceful_shutdown().await;
},
() = arbiter.shutdown() => {
info!("stopping signals watcher");
return Ok(());
}
};
}
}
/// Manager for long running tasks, such as servers and watchers.
pub(crate) struct Tasks {
pub(crate) tasks: JoinSet<Result<()>>,
arbiter: Arbiter,
}
impl Tasks {
pub(crate) fn new() -> Result<Self> {
let mut tasks = JoinSet::new();
let arbiter = Arbiter::new(&mut tasks)?;
Ok(Self { tasks, arbiter })
}
/// Build a new task. See [`tokio::task::JoinSet::build_task`] for details.
pub(crate) fn build_task(&mut self) -> Builder<'_, Result<()>> {
self.tasks.build_task()
}
/// Get an [`Arbiter`]
pub(crate) fn arbiter(&self) -> Arbiter {
self.arbiter.clone()
}
pub(crate) async fn run(self) -> Vec<Report> {
let Self { mut tasks, arbiter } = self;
let mut errors = Vec::new();
if let Some(result) = tasks.join_next().await {
arbiter.do_graceful_shutdown().await;
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => {
arbiter.do_fast_shutdown().await;
errors.push(err);
}
Err(err) => {
arbiter.do_fast_shutdown().await;
errors.push(Report::new(err));
}
}
while let Some(result) = tasks.join_next().await {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => errors.push(err),
Err(err) => errors.push(Report::new(err)),
}
}
}
errors
}
}
/// Manage shutdown state and several communication channels.
#[derive(Clone)]
pub(crate) struct Arbiter {
/// Token to shutdown the application immediately.
fast_shutdown: CancellationToken,
/// Token to shutdown the application gracefully.
graceful_shutdown: CancellationToken,
/// Token set when any shutdown is triggered.
shutdown: CancellationToken,
/// Axum handles to manage
net_handles: Arc<Mutex<Vec<Handle<net::SocketAddr>>>>,
unix_handles: Arc<Mutex<Vec<Handle<unix::net::SocketAddr>>>>,
/// Broadcaster of signals sent to the main process.
signals_tx: broadcast::Sender<SignalKind>,
/// Watcher of config change events
config_changed_tx: watch::Sender<()>,
_config_changed_rx: watch::Receiver<()>,
/// Token set when gunicorn is marked ready
gunicorn_ready: CancellationToken,
}
impl Arbiter {
fn new(tasks: &mut JoinSet<Result<()>>) -> Result<Self> {
let (signals_tx, signals_rx) = broadcast::channel(10);
let (config_changed_tx, config_changed_rx) = watch::channel(());
let arbiter = Self {
fast_shutdown: CancellationToken::new(),
graceful_shutdown: CancellationToken::new(),
shutdown: CancellationToken::new(),
// 5 is http, https, metrics and a bit of room
net_handles: Arc::new(Mutex::new(Vec::with_capacity(5))),
// 2 is http and metrics
unix_handles: Arc::new(Mutex::new(Vec::with_capacity(2))),
signals_tx,
config_changed_tx,
_config_changed_rx: config_changed_rx,
gunicorn_ready: CancellationToken::new(),
};
let streams = SignalStreams::new()?;
tasks
.build_task()
.name(&format!("{}::watch_signals", module_path!()))
.spawn(watch_signals(streams, arbiter.clone(), signals_rx))?;
Ok(arbiter)
}
pub(crate) async fn add_net_handle(&self, handle: Handle<net::SocketAddr>) {
self.net_handles.lock().await.push(handle);
}
pub(crate) async fn add_unix_handle(&self, handle: Handle<unix::net::SocketAddr>) {
self.unix_handles.lock().await.push(handle);
}
/// Future that will complete when the application needs to shutdown immediately.
pub(crate) fn fast_shutdown(&self) -> WaitForCancellationFuture<'_> {
self.fast_shutdown.cancelled()
}
/// Future that will complete when the application needs to shutdown gracefully.
pub(crate) fn graceful_shutdown(&self) -> WaitForCancellationFuture<'_> {
self.graceful_shutdown.cancelled()
}
/// Future that will complete when the application needs to shutdown, either immediately or
/// gracefully. It's a helper so users that don't make the difference between immediate and
/// graceful shutdown don't need to handle two scenarios.
pub(crate) fn shutdown(&self) -> WaitForCancellationFuture<'_> {
self.shutdown.cancelled()
}
/// Shutdown the application immediately.
async fn do_fast_shutdown(&self) {
info!("arbiter has been told to shutdown immediately");
self.unix_handles
.lock()
.await
.iter()
.for_each(Handle::shutdown);
self.net_handles
.lock()
.await
.iter()
.for_each(Handle::shutdown);
info!("all webservers have been shutdown, shutting down the other tasks immediately");
self.fast_shutdown.cancel();
self.shutdown.cancel();
}
/// Shutdown the application gracefully.
async fn do_graceful_shutdown(&self) {
info!("arbiter has been told to shutdown gracefully");
// Match the value in lifecycle/gunicorn.conf.py for graceful shutdown
let timeout = Some(Duration::from_secs(30 + 5));
self.unix_handles
.lock()
.await
.iter()
.for_each(|handle| handle.graceful_shutdown(timeout));
self.net_handles
.lock()
.await
.iter()
.for_each(|handle| handle.graceful_shutdown(timeout));
info!("all webservers have been shutdown, shutting down the other tasks gracefully");
self.graceful_shutdown.cancel();
self.shutdown.cancel();
}
/// Create a new [`broadcast::Receiver`] to listen for signals sent to the main process. This
/// may not include all signals we catch, since some of those will shutdown the application.
pub(crate) fn signals_subscribe(&self) -> broadcast::Receiver<SignalKind> {
self.signals_tx.subscribe()
}
/// Send a value on the config changes watch channel
pub(crate) fn config_changed_send(&self, value: ()) -> Result<()> {
self.config_changed_tx.send(value)?;
Ok(())
}
/// Create a new [`watch::Receiver`] to listen for detected configuration changes.
pub(crate) fn config_changed_subscribe(&self) -> watch::Receiver<()> {
self.config_changed_tx.subscribe()
}
/// Future that will complete when the application needs to shutdown gracefully.
pub(crate) fn gunicorn_ready(&self) -> WaitForCancellationFuture<'_> {
self.gunicorn_ready.cancelled()
}
/// Mark gunicorn as ready
pub(crate) fn mark_gunicorn_ready(&self) {
self.gunicorn_ready.cancel();
}
}

2
src/axum/accept/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub(crate) mod proxy_protocol;
pub(crate) mod tls;

View File

@@ -0,0 +1,86 @@
use std::{io, time::Duration};
use axum::{Extension, middleware::AddExtension};
use axum_server::accept::{Accept, DefaultAcceptor};
use futures::future::BoxFuture;
use tokio::io::{AsyncRead, AsyncWrite};
use tower::Layer as _;
use tracing::instrument;
use crate::tokio::proxy_protocol::{ProxyProtocolStream, header::Header};
#[derive(Clone, Debug)]
pub(crate) struct ProxyProtocolState {
pub(crate) header: Option<Header<'static>>,
}
#[derive(Clone)]
pub(crate) struct ProxyProtocolAcceptor<A = DefaultAcceptor> {
inner: A,
parsing_timeout: Duration,
}
impl ProxyProtocolAcceptor {
pub(crate) fn new() -> Self {
let inner = DefaultAcceptor::new();
#[cfg(not(test))]
let parsing_timeout = Duration::from_secs(10);
// Don't force tests to wait too long
#[cfg(test)]
let parsing_timeout = Duration::from_secs(1);
Self {
inner,
parsing_timeout,
}
}
}
impl Default for ProxyProtocolAcceptor {
fn default() -> Self {
Self::new()
}
}
impl<A> ProxyProtocolAcceptor<A> {
pub(crate) fn acceptor<Acceptor>(self, acceptor: Acceptor) -> ProxyProtocolAcceptor<Acceptor> {
ProxyProtocolAcceptor {
inner: acceptor,
parsing_timeout: self.parsing_timeout,
}
}
}
impl<A, I, S> Accept<I, S> for ProxyProtocolAcceptor<A>
where
A: Accept<I, S> + Clone + Send + 'static,
A::Stream: AsyncRead + AsyncWrite + Unpin + Send,
A::Service: Send,
A::Future: Send,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Send + 'static,
{
type Future = BoxFuture<'static, io::Result<(Self::Stream, Self::Service)>>;
type Service = AddExtension<A::Service, ProxyProtocolState>;
type Stream = ProxyProtocolStream<A::Stream>;
#[instrument(skip_all)]
fn accept(&self, stream: I, service: S) -> Self::Future {
let acceptor = self.inner.clone();
Box::pin(async move {
let (stream, service) = acceptor.accept(stream, service).await?;
let stream = ProxyProtocolStream::new(stream).await?;
let proxy_protocol_state = ProxyProtocolState {
header: stream.header().cloned(),
};
let service = Extension(proxy_protocol_state).layer(service);
Ok((stream, service))
})
}
}

54
src/axum/accept/tls.rs Normal file
View File

@@ -0,0 +1,54 @@
use axum::{Extension, middleware::AddExtension};
use axum_server::{accept::Accept, tls_rustls::RustlsAcceptor};
use futures::future::BoxFuture;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_rustls::{rustls::pki_types::CertificateDer, server::TlsStream};
use tower::Layer as _;
use tracing::instrument;
#[derive(Clone, Debug)]
pub(crate) struct TlsState {
pub(crate) peer_certificates: Option<Vec<CertificateDer<'static>>>,
}
#[derive(Clone)]
pub(crate) struct TlsAcceptor<A> {
inner: RustlsAcceptor<A>,
}
impl<A> TlsAcceptor<A> {
pub(crate) fn new(inner: RustlsAcceptor<A>) -> Self {
Self { inner }
}
}
impl<A, I, S> Accept<I, S> for TlsAcceptor<A>
where
A: Accept<I, S> + Clone + Send + 'static,
A::Stream: AsyncRead + AsyncWrite + Unpin + Send,
A::Service: Send,
A::Future: Send,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Send + 'static,
{
type Future = BoxFuture<'static, std::io::Result<(Self::Stream, Self::Service)>>;
type Service = AddExtension<A::Service, TlsState>;
type Stream = TlsStream<A::Stream>;
#[instrument(skip_all)]
fn accept(&self, stream: I, service: S) -> Self::Future {
let acceptor = self.inner.clone();
Box::pin(async move {
let (stream, service) = acceptor.accept(stream, service).await?;
let server_conn = stream.get_ref().1;
let tls_state = TlsState {
peer_certificates: server_conn.peer_certificates().map(|c| c.to_owned()),
};
let service = Extension(tls_state).layer(service);
Ok((stream, service))
})
}
}

26
src/axum/error.rs Normal file
View File

@@ -0,0 +1,26 @@
use axum::{
http::StatusCode,
response::{IntoResponse, Response},
};
use eyre::Report;
use tracing::warn;
#[derive(Debug)]
pub(crate) struct AppError(pub(crate) Report);
impl<E> From<E> for AppError
where E: Into<Report>
{
fn from(err: E) -> Self {
Self(err.into())
}
}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
warn!("error occurred: {:?}", self.0);
(StatusCode::INTERNAL_SERVER_ERROR, "Something went wrong").into_response()
}
}
pub(crate) type Result<T, E = AppError> = core::result::Result<T, E>;

View File

@@ -0,0 +1,228 @@
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
use axum::{
Extension, RequestPartsExt as _,
extract::{ConnectInfo, FromRequestParts, Request},
http::request::Parts,
middleware::Next,
response::Response,
};
use tracing::{Span, instrument};
use crate::axum::{
accept::proxy_protocol::ProxyProtocolState, extract::trusted_proxy::TrustedProxy,
};
#[derive(Clone, Copy, Debug)]
pub(crate) struct ClientIp(pub IpAddr);
impl<S> FromRequestParts<S> for ClientIp
where S: Send + Sync
{
type Rejection = <Extension<Self> as FromRequestParts<S>>::Rejection;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
Extension::<Self>::from_request_parts(parts, state)
.await
.map(|Extension(client_ip)| client_ip)
}
}
#[instrument(skip_all)]
async fn extract_client_ip(parts: &mut Parts) -> IpAddr {
let is_trusted = parts
.extract::<TrustedProxy>()
.await
.unwrap_or(TrustedProxy(false))
.0;
if is_trusted {
if let Ok(ip) = client_ip::rightmost_x_forwarded_for(&parts.headers) {
return ip;
}
if let Ok(ip) = client_ip::x_real_ip(&parts.headers) {
return ip;
}
if let Ok(ip) = client_ip::rightmost_forwarded(&parts.headers) {
return ip;
}
if let Ok(Extension(proxy_protocol_state)) =
parts.extract::<Extension<ProxyProtocolState>>().await
&& let Some(header) = &proxy_protocol_state.header
&& let Some(addr) = header.proxied_address()
{
return addr.source.ip();
}
}
if let Ok(ConnectInfo(addr)) = parts.extract::<ConnectInfo<SocketAddr>>().await {
addr.ip()
} else {
// No connect info means we received a request via a Unix socket, hence localhost
// as default
Ipv6Addr::LOCALHOST.into()
}
}
pub(crate) async fn client_ip_middleware(request: Request, next: Next) -> Response {
let (mut parts, body) = request.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
Span::current().record("remote", client_ip.to_string());
parts.extensions.insert::<ClientIp>(ClientIp(client_ip));
let request = Request::from_parts(parts, body);
next.run(request).await
}
#[cfg(test)]
mod tests {
use std::net::Ipv4Addr;
use axum::{body::Body, http::Request};
use super::*;
#[tokio::test]
async fn x_forwarded_for_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-for", "192.0.2.51, 192.0.2.42")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 42),);
}
#[tokio::test]
async fn x_real_ip_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-real-ip", "192.0.2.42")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 42),);
}
#[tokio::test]
async fn forwarded_header_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("forwarded", "for=192.0.2.42")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 42),);
}
#[tokio::test]
async fn from_connect_info() {
let connect_addr: SocketAddr = "192.0.2.42:34932"
.parse()
.expect("Failed to parse socket address");
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.extension(ConnectInfo(connect_addr))
.extension(TrustedProxy(false))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 42),);
}
#[tokio::test]
async fn headers_untrusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-for", "192.0.2.42")
.extension(TrustedProxy(false))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv6Addr::LOCALHOST);
}
#[tokio::test]
async fn priority_order() {
// Test that X-Forwarded-For takes priority over other headers when trusted
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-for", "192.0.2.1")
.header("x-real-ip", "192.0.2.2")
.header("forwarded", "for=192.0.2.3")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 1),);
}
#[tokio::test]
async fn no_ip_found() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv6Addr::LOCALHOST);
}
#[tokio::test]
async fn ipv6() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-for", "2001:db8::42")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x42),);
}
#[tokio::test]
async fn multiple_x_forwarded_for() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-for", "192.0.2.1, 192.0.2.2, 192.0.2.3")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let client_ip = extract_client_ip(&mut parts).await;
assert_eq!(client_ip, Ipv4Addr::new(192, 0, 2, 3),);
}
}

262
src/axum/extract/host.rs Normal file
View File

@@ -0,0 +1,262 @@
use axum::{
Extension, RequestPartsExt as _,
extract::{FromRequestParts, Request},
http::{
header::{FORWARDED, HOST},
request::Parts,
status::StatusCode,
},
middleware::Next,
response::{IntoResponse as _, Response},
};
use forwarded_header_value::ForwardedHeaderValue;
use tracing::{Span, instrument};
use crate::axum::extract::trusted_proxy::TrustedProxy;
const X_FORWARDED_HOST: &str = "X-Forwarded-Host";
#[derive(Clone, Debug)]
pub(crate) struct Host(pub String);
impl<S> FromRequestParts<S> for Host
where S: Send + Sync
{
type Rejection = <Extension<Self> as FromRequestParts<S>>::Rejection;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
Extension::<Self>::from_request_parts(parts, state)
.await
.map(|Extension(host)| host)
}
}
#[instrument(skip_all)]
async fn extract_host(parts: &mut Parts) -> Result<String, (StatusCode, &'static str)> {
let is_trusted = parts
.extract::<TrustedProxy>()
.await
.unwrap_or(TrustedProxy(false))
.0;
if is_trusted {
if let Some(host) = parts
.headers
.get(X_FORWARDED_HOST)
.and_then(|host| host.to_str().ok())
{
return Ok(host.to_owned());
}
if let Some(forwarded) = parts.headers.get(FORWARDED)
&& let Ok(forwarded) = forwarded.to_str()
&& let Ok(forwarded) = ForwardedHeaderValue::from_forwarded(forwarded)
{
for stanza in forwarded.iter() {
if let Some(forwarded_host) = &stanza.forwarded_host {
return Ok(forwarded_host.to_owned());
}
}
}
}
if let Some(host) = parts.headers.get(HOST).and_then(|host| host.to_str().ok()) {
return Ok(host.to_owned());
}
if let Some(host) = parts.uri.host() {
Ok(host.to_owned())
} else {
Err((StatusCode::BAD_REQUEST, "missing host header"))
}
}
pub(crate) async fn host_middleware(request: Request, next: Next) -> Response {
let (mut parts, body) = request.into_parts();
let host = match extract_host(&mut parts).await {
Ok(host) => host,
Err(err) => return err.into_response(),
};
Span::current().record("host", host.clone());
parts.extensions.insert::<Host>(Host(host));
let request = Request::from_parts(parts, body);
next.run(request).await
}
#[cfg(test)]
mod tests {
use axum::{body::Body, http::Request};
use super::*;
#[tokio::test]
async fn host_header() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("host", "example.com:8080")
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"example.com:8080",
);
}
#[tokio::test]
async fn from_uri() {
let (mut parts, _) = Request::builder()
.uri("http://example.com:8080/path")
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"example.com",
);
}
#[tokio::test]
async fn x_forwarded_host_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-host", "forwarded.example.com")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"forwarded.example.com",
);
}
#[tokio::test]
async fn forwarded_header_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("forwarded", "host=forwarded.example.com")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"forwarded.example.com",
);
}
#[tokio::test]
async fn forwarded_host_untrusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-host", "malicious.example.com")
.extension(TrustedProxy(false))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"example.com",
);
}
#[tokio::test]
async fn forwarded_header_untrusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("forwarded", "host=malicious.example.com")
.extension(TrustedProxy(false))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"example.com",
);
}
#[tokio::test]
async fn priority_order() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-host", "x-forwarded.example.com")
.header("forwarded", "host=forwarded.example.com")
.header("host", "host-header.example.com")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"x-forwarded.example.com",
);
}
#[tokio::test]
async fn no_host_found() {
let (mut parts, _) = Request::builder()
.uri("/path")
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_err());
assert_eq!(result.expect_err("Host extract should fail").0, 400);
}
#[tokio::test]
async fn multiple_forwarded_stanzas() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header(
"forwarded",
"host=first.example.com, host=second.example.com",
)
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let result = extract_host(&mut parts).await;
assert!(result.is_ok());
assert_eq!(
result.expect("Host extraction should succeed"),
"first.example.com",
);
}
}

4
src/axum/extract/mod.rs Normal file
View File

@@ -0,0 +1,4 @@
pub(crate) mod client_ip;
pub(crate) mod host;
pub(crate) mod scheme;
pub(crate) mod trusted_proxy;

241
src/axum/extract/scheme.rs Normal file
View File

@@ -0,0 +1,241 @@
use axum::{
Extension, RequestPartsExt as _,
extract::{FromRequestParts, Request},
http::{self, header::FORWARDED, request::Parts},
middleware::Next,
response::Response,
};
use forwarded_header_value::{ForwardedHeaderValue, Protocol};
use tracing::{Span, instrument};
use crate::axum::{
accept::{proxy_protocol::ProxyProtocolState, tls::TlsState},
extract::trusted_proxy::TrustedProxy,
};
const X_FORWARDED_PROTO: &str = "X-Forwarded-Proto";
const X_FORWARDED_SCHEME: &str = "X-Forwarded-Scheme";
#[derive(Clone, Debug)]
pub(crate) struct Scheme(pub http::uri::Scheme);
impl<S> FromRequestParts<S> for Scheme
where S: Send + Sync
{
type Rejection = <Extension<Self> as FromRequestParts<S>>::Rejection;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
Extension::<Self>::from_request_parts(parts, state)
.await
.map(|Extension(scheme)| scheme)
}
}
#[instrument(skip_all)]
async fn extract_scheme(parts: &mut Parts) -> http::uri::Scheme {
let is_trusted = parts
.extract::<TrustedProxy>()
.await
.unwrap_or(TrustedProxy(false))
.0;
if is_trusted {
if let Some(proto) = parts.headers.get(X_FORWARDED_PROTO)
&& let Ok(proto) = proto.to_str()
&& let Ok(scheme) = proto.to_lowercase().as_str().try_into()
{
return scheme;
}
if let Some(proto) = parts.headers.get(X_FORWARDED_SCHEME)
&& let Ok(proto) = proto.to_str()
&& let Ok(scheme) = proto.to_lowercase().as_str().try_into()
{
return scheme;
}
if let Some(forwarded) = parts.headers.get(FORWARDED)
&& let Ok(forwarded) = forwarded.to_str()
&& let Ok(forwarded) = ForwardedHeaderValue::from_forwarded(forwarded)
{
for stanza in forwarded.iter() {
if let Some(forwarded_proto) = &stanza.forwarded_proto {
let scheme = match forwarded_proto {
Protocol::Http => http::uri::Scheme::HTTP,
Protocol::Https => http::uri::Scheme::HTTPS,
};
return scheme;
}
}
}
if let Ok(Extension(proxy_protocol_state)) =
parts.extract::<Extension<ProxyProtocolState>>().await
&& let Some(header) = &proxy_protocol_state.header
&& let Some(_) = header.ssl()
{
return http::uri::Scheme::HTTPS;
}
}
if parts.extract::<Extension<TlsState>>().await.is_ok() {
http::uri::Scheme::HTTPS
} else {
http::uri::Scheme::HTTP
}
}
pub(crate) async fn scheme_middleware(request: Request, next: Next) -> Response {
let (mut parts, body) = request.into_parts();
let scheme = extract_scheme(&mut parts).await;
Span::current().record("scheme", scheme.to_string());
parts.extensions.insert::<Scheme>(Scheme(scheme));
let request = Request::from_parts(parts, body);
next.run(request).await
}
#[cfg(test)]
mod tests {
use axum::{body::Body, http::Request};
use super::*;
#[tokio::test]
async fn x_forwarded_proto_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-proto", "https")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTPS,);
}
#[tokio::test]
async fn x_forwarded_scheme_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-scheme", "https")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTPS,);
}
#[tokio::test]
async fn forwarded_header_trusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("forwarded", "proto=https")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTPS,);
}
#[tokio::test]
async fn x_forwarded_proto_untrusted() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-proto", "https")
.extension(TrustedProxy(false))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTP,);
}
#[tokio::test]
async fn scheme_from_tls_state() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.extension(TlsState {
peer_certificates: None,
})
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTPS,);
}
#[tokio::test]
async fn scheme_defaults_to_http() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTP,);
}
#[tokio::test]
async fn priority_order() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-proto", "http")
.header("x-forwarded-scheme", "https")
.header("forwarded", "proto=https")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTP,);
}
#[tokio::test]
async fn multiple_forwarded_stanzas() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("forwarded", "proto=http, proto=https")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTP,);
}
#[tokio::test]
async fn test_scheme_case_insensitive() {
let (mut parts, _) = Request::builder()
.uri("http://example.com/path")
.header("x-forwarded-proto", "HTTPS")
.extension(TrustedProxy(true))
.body(Body::empty())
.expect("Failed to create request")
.into_parts();
let scheme = extract_scheme(&mut parts).await;
assert_eq!(scheme, http::uri::Scheme::HTTPS,);
}
}

View File

@@ -0,0 +1,59 @@
use std::net::SocketAddr;
use axum::{
Extension, RequestPartsExt as _,
extract::{ConnectInfo, FromRequestParts, Request},
http::request::Parts,
middleware::Next,
response::Response,
};
use tracing::{instrument, trace};
use crate::config;
#[derive(Clone, Copy, Debug)]
pub(crate) struct TrustedProxy(pub bool);
impl<S> FromRequestParts<S> for TrustedProxy
where S: Send + Sync
{
type Rejection = <Extension<Self> as FromRequestParts<S>>::Rejection;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
Extension::<Self>::from_request_parts(parts, state)
.await
.map(|Extension(trusted_proxy)| trusted_proxy)
}
}
#[instrument(skip_all)]
async fn extract_trusted_proxy(parts: &mut Parts) -> bool {
if let Ok(ConnectInfo(addr)) = parts.extract::<ConnectInfo<SocketAddr>>().await {
let trusted_proxy_cidrs = &config::get().listen.trusted_proxy_cidrs;
for trusted_net in trusted_proxy_cidrs {
if trusted_net.contains(&addr.ip()) {
trace!(
?addr,
?trusted_net,
"connection is now considered coming from a trusted proxy"
);
return true;
}
}
}
false
}
pub(crate) async fn trusted_proxy_middleware(request: Request, next: Next) -> Response {
let (mut parts, body) = request.into_parts();
let trusted_proxy = extract_trusted_proxy(&mut parts).await;
parts
.extensions
.insert::<TrustedProxy>(TrustedProxy(trusted_proxy));
let request = Request::from_parts(parts, body);
next.run(request).await
}

6
src/axum/mod.rs Normal file
View File

@@ -0,0 +1,6 @@
pub(crate) mod accept;
pub(crate) mod error;
pub(crate) mod extract;
pub(crate) mod router;
pub(crate) mod server;
pub(crate) mod trace;

39
src/axum/router.rs Normal file
View File

@@ -0,0 +1,39 @@
use axum::{Router, http::status::StatusCode, middleware::from_fn};
use tower::ServiceBuilder;
use tower_http::timeout::TimeoutLayer;
use crate::{
axum::{
extract::{
client_ip::client_ip_middleware, host::host_middleware, scheme::scheme_middleware,
trusted_proxy::trusted_proxy_middleware,
},
trace::{span_middleware, tracing_middleware},
},
config,
};
#[inline]
pub(crate) fn wrap_router(router: Router, with_trace: bool) -> Router {
let config = config::get();
let timeout = durstr::parse(&config.web.timeout_http_read_header)
.expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_read).expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_write).expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_idle).expect("Invalid duration in http timeout");
let service_builder = ServiceBuilder::new()
.layer(TimeoutLayer::with_status_code(
StatusCode::REQUEST_TIMEOUT,
timeout,
))
.layer(from_fn(span_middleware))
.layer(from_fn(trusted_proxy_middleware))
.layer(from_fn(client_ip_middleware))
.layer(from_fn(scheme_middleware))
.layer(from_fn(host_middleware));
if with_trace {
router.layer(service_builder.layer(from_fn(tracing_middleware)))
} else {
router.layer(service_builder)
}
}

119
src/axum/server.rs Normal file
View File

@@ -0,0 +1,119 @@
use std::{net, os::unix};
use axum::Router;
use axum_server::{
Handle,
accept::DefaultAcceptor,
tls_rustls::{RustlsAcceptor, RustlsConfig},
};
use eyre::Result;
use tracing::info;
use crate::{
arbiter::{Arbiter, Tasks},
axum::accept::{proxy_protocol::ProxyProtocolAcceptor, tls::TlsAcceptor},
};
async fn run_plain(
arbiter: Arbiter,
name: &str,
router: Router,
addr: net::SocketAddr,
) -> Result<()> {
info!(addr = addr.to_string(), "starting {name} server");
let handle = Handle::new();
arbiter.add_net_handle(handle.clone()).await;
axum_server::Server::bind(addr)
.handle(handle)
.serve(router.into_make_service_with_connect_info::<net::SocketAddr>())
.await?;
Ok(())
}
pub(crate) fn start_plain(
tasks: &mut Tasks,
name: &'static str,
router: Router,
addr: net::SocketAddr,
) -> Result<()> {
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!("{}::run_plain({name}, {addr})", module_path!()))
.spawn(run_plain(arbiter, name, router, addr))?;
Ok(())
}
pub(crate) async fn run_unix(
arbiter: Arbiter,
name: &str,
router: Router,
addr: unix::net::SocketAddr,
) -> Result<()> {
info!(addr = ?addr, "starting {name} server");
let handle = Handle::new();
arbiter.add_unix_handle(handle.clone()).await;
axum_server::Server::bind(addr)
.handle(handle)
.serve(router.into_make_service())
.await?;
Ok(())
}
pub(crate) fn start_unix(
tasks: &mut Tasks,
name: &'static str,
router: Router,
addr: unix::net::SocketAddr,
) -> Result<()> {
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!("{}::run_unix({name}, {addr:?})", module_path!()))
.spawn(run_unix(arbiter, name, router, addr))?;
Ok(())
}
async fn run_tls(
arbiter: Arbiter,
name: &str,
router: Router,
addr: net::SocketAddr,
config: RustlsConfig,
) -> Result<()> {
info!(addr = addr.to_string(), "starting {name} server");
let handle = Handle::new();
arbiter.add_net_handle(handle.clone()).await;
axum_server::Server::bind(addr)
.acceptor(TlsAcceptor::new(RustlsAcceptor::new(config).acceptor(
ProxyProtocolAcceptor::new().acceptor(DefaultAcceptor::new()),
)))
.handle(handle)
.serve(router.into_make_service_with_connect_info::<net::SocketAddr>())
.await?;
Ok(())
}
pub(crate) fn start_tls(
tasks: &mut Tasks,
name: &'static str,
router: Router,
addr: net::SocketAddr,
config: RustlsConfig,
) -> Result<()> {
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!("{}::run_tls({name}, {addr})", module_path!()))
.spawn(run_tls(arbiter, name, router, addr, config))?;
Ok(())
}

48
src/axum/trace.rs Normal file
View File

@@ -0,0 +1,48 @@
use std::collections::HashMap;
use axum::{extract::Request, middleware::Next, response::Response};
use tokio::time::Instant;
use tracing::{Instrument as _, field, info, info_span, trace};
use crate::config;
pub(crate) async fn span_middleware(request: Request, next: Next) -> Response {
let config = config::get();
let http_headers = request
.headers()
.iter()
.filter(|(name, _)| {
for header in &config.log.http_headers {
if header.eq_ignore_ascii_case(name.as_str()) {
return true;
}
}
false
})
.map(|(name, value)| (name.to_string().to_lowercase().replace('-', "_"), value))
.collect::<HashMap<_, _>>();
let span = info_span!(
"request",
path = %request.uri(),
method = %request.method(),
remote = field::Empty,
scheme = field::Empty,
host = field::Empty,
http_headers = ?http_headers,
);
next.run(request).instrument(span).await
}
pub(crate) async fn tracing_middleware(request: Request, next: Next) -> Response {
let event = request.uri().clone();
trace!("request start");
let start = Instant::now();
let response = next.run(request).await;
let runtime = start.elapsed();
let status = response.status().as_u16();
info!(status = status, runtime = runtime.as_millis(), "{event}");
response
}

1
src/brands/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub(crate) mod tls;

134
src/brands/tls.rs Normal file
View File

@@ -0,0 +1,134 @@
use std::{
collections::{HashMap, hash_map::Entry},
sync::Arc,
};
use eyre::{Report, Result};
use rustls::{
RootCertStore,
crypto::CryptoProvider,
pki_types::{CertificateDer, PrivateKeyDer, pem::PemObject as _},
server::ClientHello,
sign::CertifiedKey,
};
use crate::db;
#[derive(Debug)]
struct Brand {
domain: String,
default: bool,
web_certificate: Arc<CertifiedKey>,
}
#[derive(Debug)]
pub(crate) struct CertResolver {
brands: Vec<Brand>,
}
impl CertResolver {
pub(crate) fn resolve(&self, client_hello: &ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
let server_name = client_hello.server_name()?;
let mut best = None;
for brand in &self.brands {
if best.is_none() && brand.default {
best = Some(Arc::clone(&brand.web_certificate));
}
if server_name == brand.domain || server_name.ends_with(&format!(".{}", brand.domain)) {
best = Some(Arc::clone(&brand.web_certificate));
}
}
best
}
}
pub(crate) async fn make_cert_managers() -> Result<(CertResolver, RootCertStore)> {
#[derive(sqlx::FromRow)]
struct BrandRow {
brand_uuid: uuid::Uuid,
domain: String,
default: bool,
web_cert_data: Option<String>,
web_cert_key: Option<String>,
client_cert_data: Option<String>,
}
let rows = sqlx::query_as::<_, BrandRow>(
"
SELECT
b.brand_uuid,
b.domain,
b.default,
wc.certificate_data AS web_cert_data,
wc.key_data AS web_cert_key,
cc.certificate_data AS client_cert_data
FROM authentik_brands_brand b
LEFT JOIN authentik_crypto_certificatekeypair wc
ON wc.kp_uuid = b.web_certificate_id
LEFT JOIN authentik_brands_brand_client_certificates bcc
ON bcc.brand_id = b.brand_uuid
LEFT JOIN authentik_crypto_certificatekeypair cc
ON cc.kp_uuid = bcc.certificatekeypair_id
",
)
.fetch_all(db::get())
.await?;
let (brands, roots) = tokio::task::spawn_blocking(|| {
let mut brands = HashMap::new();
let mut roots = RootCertStore::empty();
for row in rows {
let BrandRow {
brand_uuid,
domain,
default,
web_cert_data,
web_cert_key,
client_cert_data,
} = row;
if let (Some(certificate_data), Some(key_data)) = (web_cert_data, web_cert_key)
&& let Entry::Vacant(e) = brands.entry(brand_uuid)
{
let brand = Brand {
domain,
default,
web_certificate: {
let cert_chain =
CertificateDer::pem_reader_iter(certificate_data.as_bytes())
.collect::<Result<Vec<_>, _>>()?;
let key_der = PrivateKeyDer::from_pem_reader(key_data.as_bytes())?;
let provider =
CryptoProvider::get_default().expect("no rustls provider installed");
Arc::new(CertifiedKey::new(
cert_chain,
provider.key_provider.load_private_key(key_der)?,
))
},
};
e.insert(brand);
}
if let Some(certificate_data) = client_cert_data {
let cert_chain = CertificateDer::pem_reader_iter(certificate_data.as_bytes())
.collect::<Result<Vec<_>, _>>()?;
for cert in cert_chain {
roots.add(cert)?;
}
}
}
Ok::<_, Report>((brands, roots))
})
.await??;
Ok((
CertResolver {
brands: brands.into_values().collect(),
},
roots,
))
}

244
src/config/mod.rs Normal file
View File

@@ -0,0 +1,244 @@
use std::{
env,
fs::{self, read_to_string},
path::PathBuf,
sync::{Arc, OnceLock},
};
use arc_swap::{ArcSwap, Guard};
use eyre::Result;
use notify::{RecommendedWatcher, Watcher as _};
use serde_json::{Map, Value};
use tokio::sync::mpsc;
use tracing::{error, info, warn};
pub(crate) mod schema;
pub(crate) use schema::Config;
use url::Url;
use crate::arbiter::{Arbiter, Tasks};
static DEFAULT_CONFIG: &str = include_str!("../../authentik/lib/default.yml");
static CONFIG_MANAGER: OnceLock<ConfigManager> = OnceLock::new();
fn config_paths() -> Vec<PathBuf> {
let mut config_paths = vec![
PathBuf::from("/etc/authentik/config.yml"),
PathBuf::from(""),
];
if let Ok(workspace) = env::var("WORKSPACE_DIR") {
let _ = env::set_current_dir(workspace);
}
if let Ok(paths) = glob::glob("/etc/authentik/config.d/*.yml") {
config_paths.extend(paths.filter_map(Result::ok));
}
let environment = env::var("AUTHENTIK_ENV").unwrap_or_else(|_| "local".to_owned());
let mut computed_paths = Vec::new();
for path in config_paths {
if let Ok(metadata) = fs::metadata(&path) {
if !metadata.is_dir() {
computed_paths.push(path);
}
} else {
let env_paths = vec![
path.join(format!("{environment}.yml")),
path.join(format!("{environment}.env.yml")),
];
for env_path in env_paths {
if let Ok(metadata) = fs::metadata(&env_path)
&& !metadata.is_dir()
{
computed_paths.push(env_path);
}
}
}
}
computed_paths
}
impl Config {
fn load_raw(config_paths: &[PathBuf]) -> Result<Value> {
let mut builder = config::Config::builder().add_source(config::File::from_str(
DEFAULT_CONFIG,
config::FileFormat::Yaml,
));
for path in config_paths {
builder = builder
.add_source(config::File::from(path.as_path()).format(config::FileFormat::Yaml));
}
builder = builder.add_source(config::Environment::with_prefix("AUTHENTIK"));
let config = builder.build()?;
let raw = config.try_deserialize::<Value>()?;
Ok(raw)
}
fn expand_value(value: &str) -> (String, Option<PathBuf>) {
let value = value.trim();
if let Ok(uri) = Url::parse(value) {
let fallback = uri.query().unwrap_or("").to_owned();
match uri.scheme() {
"file" => {
let path = uri.path();
match read_to_string(path).map(|s| s.trim().to_owned()) {
Ok(value) => return (value, Some(PathBuf::from(path))),
Err(err) => {
error!("failed to read config value from {path}: {err}");
return (fallback, Some(PathBuf::from(path)));
}
}
}
"env" => {
if let Some(var) = uri.host_str() {
if let Ok(value) = env::var(var) {
return (value, None);
}
return (fallback, None);
}
}
_ => {}
}
}
(value.to_owned(), None)
}
fn expand(mut raw: Value) -> (Value, Vec<PathBuf>) {
let mut file_paths = Vec::new();
let value = match &mut raw {
Value::String(s) => {
let (v, path) = Self::expand_value(s);
if let Some(path) = path {
file_paths.push(path);
}
Value::String(v)
}
Value::Array(arr) => {
let mut res = Vec::with_capacity(arr.len());
for v in arr {
let (expanded, paths) = Self::expand(v.clone());
file_paths.extend(paths);
res.push(expanded);
}
Value::Array(res)
}
Value::Object(map) => {
let mut res = Map::with_capacity(map.len());
for (k, v) in map {
let (expanded, paths) = Self::expand(v.clone());
file_paths.extend(paths);
res.insert(k.clone(), expanded);
}
Value::Object(res)
}
_ => raw,
};
(value, file_paths)
}
fn load(config_paths: &[PathBuf]) -> Result<(Self, Vec<PathBuf>)> {
let raw = Self::load_raw(config_paths)?;
let (expanded, file_paths) = Self::expand(raw);
let config: Self = serde_json::from_value(expanded)?;
Ok((config, file_paths))
}
}
pub(crate) struct ConfigManager {
config: ArcSwap<Config>,
config_paths: Vec<PathBuf>,
watch_paths: Vec<PathBuf>,
}
impl ConfigManager {
pub(crate) fn init() -> Result<()> {
info!("loading config");
let config_paths = config_paths();
let mut watch_paths = config_paths.clone();
let (config, other_paths) = Config::load(&config_paths)?;
watch_paths.extend(other_paths);
let manager = Self {
config: ArcSwap::from_pointee(config),
config_paths,
watch_paths,
};
CONFIG_MANAGER.get_or_init(|| manager);
info!("config loaded");
Ok(())
}
pub(crate) fn run(tasks: &mut Tasks) -> Result<()> {
info!("starting config file watcher");
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!("{}::watch_config", module_path!()))
.spawn(watch_config(arbiter))?;
Ok(())
}
}
async fn watch_config(arbiter: Arbiter) -> Result<()> {
let (tx, mut rx) = mpsc::channel(100);
let mut watcher = RecommendedWatcher::new(
move |res: notify::Result<notify::Event>| {
if let Ok(event) = res
&& let notify::EventKind::Modify(_) = &event.kind
{
let _ = tx.blocking_send(());
}
},
notify::Config::default(),
)?;
let watch_paths = &CONFIG_MANAGER
.get()
.expect("failed to get config, has it been initialized?")
.watch_paths;
for path in watch_paths {
watcher.watch(path.as_ref(), notify::RecursiveMode::NonRecursive)?;
}
info!("config file watcher started on paths: {:?}", watch_paths);
loop {
tokio::select! {
res = rx.recv() => {
info!("a configuration file changed, reloading config");
if res.is_none() {
break;
}
let manager = CONFIG_MANAGER.get().expect("failed to get config, has it been initialized?");
match tokio::task::spawn_blocking(|| Config::load(&manager.config_paths)).await? {
Ok((new_config, _)) => {
info!("configuration reloaded");
manager.config.store(Arc::new(new_config));
if let Err(err) = arbiter.config_changed_send(()) {
warn!("failed to notify of config change, aborting: {err:?}");
break;
}
}
Err(err) => {
warn!("failed to reload config, continuing with previous config: {err:?}");
}
}
},
() = arbiter.shutdown() => break,
}
}
info!("stopping config file watcher");
Ok(())
}
pub(crate) fn get() -> Guard<Arc<Config>> {
let manager = CONFIG_MANAGER
.get()
.expect("failed to get config, has it been initialized?");
manager.config.load()
}

146
src/config/schema.rs Normal file
View File

@@ -0,0 +1,146 @@
use std::{collections::HashMap, net::SocketAddr, num::NonZeroUsize, path::PathBuf};
use ipnet::IpNet;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct Config {
pub(crate) postgresql: PostgreSQLConfig,
pub(crate) listen: ListenConfig,
pub(crate) http_timeout: u32,
pub(crate) debug: bool,
#[serde(default)]
pub(crate) secret_key: String,
pub(crate) log_level: String,
pub(crate) log: LogConfig,
pub(crate) error_reporting: ErrorReportingConfig,
pub(crate) outposts: OutpostsConfig,
pub(crate) cookie_domain: Option<String>,
pub(crate) compliance: ComplianceConfig,
pub(crate) blueprints_dir: PathBuf,
pub(crate) cert_discovery_dir: PathBuf,
pub(crate) web: WebConfig,
pub(crate) worker: WorkerConfig,
pub(crate) storage: StorageConfig,
// Outpost specific config
// These are only relevant for outposts, and cannot be set via YAML
// They are loaded via this config loader to support file:// schemas
pub(crate) authentik_host: Option<String>,
pub(crate) authentik_host_browser: Option<String>,
pub(crate) authentik_token: Option<String>,
pub(crate) authentik_insecure: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct PostgreSQLConfig {
pub(crate) host: String,
pub(crate) port: u16,
pub(crate) user: String,
pub(crate) password: String,
pub(crate) name: String,
pub(crate) sslmode: String,
pub(crate) sslrootcert: Option<String>,
pub(crate) sslcert: Option<String>,
pub(crate) sslkey: Option<String>,
pub(crate) conn_max_age: Option<u64>,
pub(crate) conn_health_checks: bool,
pub(crate) default_schema: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ListenConfig {
pub(crate) http: Vec<SocketAddr>,
pub(crate) https: Vec<SocketAddr>,
pub(crate) ldap: Vec<SocketAddr>,
pub(crate) ldaps: Vec<SocketAddr>,
pub(crate) radius: Vec<SocketAddr>,
pub(crate) metrics: Vec<SocketAddr>,
pub(crate) debug: SocketAddr,
pub(crate) trusted_proxy_cidrs: Vec<IpNet>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct LogConfig {
pub(crate) http_headers: Vec<String>,
pub(crate) rust_log: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ErrorReportingConfig {
pub(crate) enabled: bool,
pub(crate) sentry_dsn: Option<String>,
pub(crate) environment: String,
pub(crate) send_pii: bool,
pub(crate) sample_rate: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct OutpostsConfig {
pub(crate) disable_embedded_outpost: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ComplianceConfig {
pub(crate) fips: ComplianceFipsConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ComplianceFipsConfig {
pub(crate) enabled: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebConfig {
pub(crate) workers: usize,
pub(crate) threads: usize,
pub(crate) path: String,
pub(crate) timeout_http_read_header: String,
pub(crate) timeout_http_read: String,
pub(crate) timeout_http_write: String,
pub(crate) timeout_http_idle: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WorkerConfig {
pub(crate) processes: NonZeroUsize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct StorageConfig {
pub(crate) backend: String,
pub(crate) file: StorageFileConfig,
pub(crate) media: Option<StorageOverrideConfig>,
pub(crate) reports: Option<StorageOverrideConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct StorageFileConfig {
pub(crate) path: PathBuf,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub(crate) struct StorageOverrideConfig {
pub(crate) backend: Option<String>,
pub(crate) file: Option<StorageFileOverrideConfig>,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub(crate) struct StorageFileOverrideConfig {
pub(crate) path: Option<PathBuf>,
}

110
src/db.rs Normal file
View File

@@ -0,0 +1,110 @@
use std::{str::FromStr as _, sync::OnceLock, time::Duration};
use eyre::Result;
use sqlx::{
Executor as _, PgPool,
postgres::{PgConnectOptions, PgPoolOptions, PgSslMode},
};
use tracing::{info, log::LevelFilter, trace};
use crate::{
arbiter::{Arbiter, Tasks},
authentik_full_version, config,
mode::Mode,
};
static DB: OnceLock<PgPool> = OnceLock::new();
fn get_connect_opts() -> Result<PgConnectOptions> {
let config = config::get();
let mut opts = PgConnectOptions::new()
.application_name(&format!(
"authentik-{}@{}",
Mode::get(),
authentik_full_version()
))
.host(&config.postgresql.host)
.port(config.postgresql.port)
.username(&config.postgresql.user)
.password(&config.postgresql.password)
.database(&config.postgresql.name)
.ssl_mode(PgSslMode::from_str(&config.postgresql.sslmode)?);
if let Some(sslrootcert) = &config.postgresql.sslrootcert {
opts = opts.ssl_root_cert_from_pem(sslrootcert.as_bytes().to_vec());
}
if let Some(sslcert) = &config.postgresql.sslcert {
opts = opts.ssl_client_cert_from_pem(sslcert.as_bytes());
}
if let Some(sslkey) = &config.postgresql.sslkey {
opts = opts.ssl_client_key_from_pem(sslkey.as_bytes());
}
Ok(opts)
}
async fn update_connect_opts_on_config_change(arbiter: Arbiter) -> Result<()> {
let mut config_changed_rx = arbiter.config_changed_subscribe();
info!("starting database watcher for config changes");
loop {
tokio::select! {
res = config_changed_rx.changed() => {
if let Err(err) = res {
trace!("error receiving config changes: {err:?}");
break;
}
trace!("config change received, refreshing database connection options");
let db = get();
db.set_connect_options(get_connect_opts()?);
},
() = arbiter.shutdown() => break,
}
}
info!("stopping database watcher for config changes");
Ok(())
}
pub(crate) async fn init(tasks: &mut Tasks) -> Result<()> {
info!("initializing database pool");
let options = get_connect_opts()?;
let config = config::get();
let pool_options = PgPoolOptions::new()
.min_connections(1)
.max_connections(4)
.acquire_time_level(LevelFilter::Trace)
.max_lifetime(config.postgresql.conn_max_age.map(Duration::from_secs))
.test_before_acquire(config.postgresql.conn_health_checks)
.after_connect(|conn, _meta| {
Box::pin(async move {
let application_name =
format!("authentik-{}@{}", Mode::get(), authentik_full_version());
let default_schema = &config::get().postgresql.default_schema;
let query = format!(
"SET application_name = '{application_name}'; SET search_path = \
'{default_schema}';"
);
conn.execute(query.as_str()).await?;
Ok(())
})
});
let pool = pool_options.connect_with(options).await?;
DB.get_or_init(|| pool);
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!(
"{}::update_connect_opts_on_config_change",
module_path!(),
))
.spawn(update_connect_opts_on_config_change(arbiter))?;
info!("database pool initialized");
Ok(())
}
pub(crate) fn get() -> &'static PgPool {
DB.get()
.expect("failed to get db, has it been initialized?")
}

220
src/main.rs Normal file
View File

@@ -0,0 +1,220 @@
use std::{
process::exit,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
};
use ::tracing::{error, info, trace};
use argh::FromArgs;
use eyre::{Result, eyre};
use crate::{arbiter::Tasks, config::ConfigManager, mode::Mode};
mod arbiter;
mod axum;
#[cfg(feature = "core")]
mod brands;
mod config;
#[cfg(feature = "core")]
mod db;
mod metrics;
mod mode;
#[cfg(feature = "proxy")]
mod proxy;
#[cfg(feature = "core")]
mod server;
mod tokio;
mod tracing;
#[cfg(feature = "core")]
mod worker;
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub(crate) fn authentik_build_hash(fallback: Option<String>) -> String {
std::env::var("GIT_BUILD_HASH").unwrap_or_else(|_| fallback.unwrap_or_default())
}
pub(crate) fn authentik_full_version() -> String {
let build_hash = authentik_build_hash(None);
if build_hash.is_empty() {
VERSION.to_owned()
} else {
format!("{VERSION}+{build_hash}")
}
}
pub(crate) fn authentik_user_agent() -> String {
format!("authentik@{}", authentik_full_version())
}
#[derive(Debug, FromArgs, PartialEq)]
/// The authentication glue you need.
struct Cli {
#[argh(subcommand)]
command: Command,
}
#[derive(Debug, FromArgs, PartialEq)]
#[argh(subcommand)]
enum Command {
#[cfg(feature = "core")]
AllInOne(AllInOne),
#[cfg(feature = "core")]
Server(server::Cli),
#[cfg(feature = "core")]
Worker(worker::Cli),
#[cfg(feature = "proxy")]
Proxy(proxy::Cli),
#[cfg(feature = "core")]
Manage(Manage),
}
#[derive(Debug, FromArgs, PartialEq)]
/// Run the authentik server and worker.
#[argh(subcommand, name = "allinone")]
#[expect(
clippy::empty_structs_with_brackets,
reason = "argh doesn't support unit structs"
)]
struct AllInOne {}
#[derive(Debug, FromArgs, PartialEq)]
/// authentik django's management command.
#[argh(subcommand, name = "manage")]
struct Manage {
#[argh(positional, greedy)]
args: Vec<String>,
}
fn main() -> Result<()> {
let tracing_crude = tracing::install_crude();
info!(version = env!("CARGO_PKG_VERSION"), "authentik is starting");
let cli: Cli = argh::from_env();
match &cli.command {
#[cfg(feature = "core")]
Command::AllInOne(_) => Mode::set(Mode::AllInOne)?,
#[cfg(feature = "core")]
Command::Server(_) => Mode::set(Mode::Server)?,
#[cfg(feature = "core")]
Command::Worker(_) => Mode::set(Mode::Worker)?,
#[cfg(feature = "proxy")]
Command::Proxy(_) => Mode::set(Mode::Proxy)?,
#[cfg(feature = "core")]
Command::Manage(args) => {
let mut process = std::process::Command::new("python")
.args(["-m", "manage"])
.args(&args.args)
.spawn()?;
let status = process.wait()?;
if let Some(code) = status.code() {
exit(code);
}
return Ok(());
}
}
trace!("installing error formatting");
color_eyre::install()?;
trace!("installing rustls crypto provider");
#[expect(
clippy::unwrap_in_result,
reason = "result type does not implement Error"
)]
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("Failed to install rustls provider");
#[cfg(feature = "core")]
if Mode::is_core() {
if std::env::var("PROMETHEUS_MULTIPROC_DIR").is_err() {
let dir = std::env::temp_dir().join("authentik_prometheus_tmp");
std::fs::create_dir_all(&dir)?;
#[expect(unsafe_code, reason = "see safety comment below")]
// SAFETY: there is only one thread at this point, so this is safe.
unsafe {
std::env::set_var("PROMETHEUS_MULTIPROC_DIR", dir);
}
trace!(
env = std::env::var("PROMETHEUS_MULTIPROC_DIR").unwrap_or_default(),
"setting PROMETHEUS_MULTIPROC_DIR"
);
} else {
trace!("PROMETHEUS_MULTIPROC_DIR already set");
}
trace!("initializing Python");
pyo3::Python::initialize();
trace!("Python initialized");
}
ConfigManager::init()?;
let _sentry = config::get()
.error_reporting
.enabled
.then(tracing::sentry::install);
tracing::install()?;
drop(tracing_crude);
::tokio::runtime::Builder::new_multi_thread()
.thread_name_fn(|| {
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
format!("tokio-{id}")
})
.enable_all()
.build()?
.block_on(async {
let mut tasks = Tasks::new()?;
ConfigManager::run(&mut tasks)?;
let metrics = metrics::run(&mut tasks)?;
#[cfg(feature = "core")]
if Mode::is_core() {
db::init(&mut tasks).await?;
}
match cli.command {
#[cfg(feature = "core")]
Command::AllInOne(_) => {
let workers = worker::run(worker::Cli::default(), &mut tasks)?;
metrics.workers.store(Some(Arc::clone(&workers)));
let server = server::run(server::Cli::default(), &mut tasks)?;
server.workers.store(Some(workers));
metrics.server.store(Some(server));
}
#[cfg(feature = "core")]
Command::Server(args) => {
let server = server::run(args, &mut tasks)?;
metrics.server.store(Some(server));
}
#[cfg(feature = "core")]
Command::Worker(args) => {
let workers = worker::run(args, &mut tasks)?;
metrics.workers.store(Some(workers));
}
#[cfg(feature = "proxy")]
Command::Proxy(args) => proxy::run(args, &mut tasks)?,
#[cfg(feature = "core")]
Command::Manage(_) => unreachable!(),
}
let errors = tasks.run().await;
if errors.is_empty() {
info!("authentik exiting");
Ok(())
} else {
error!("authentik encountered errors: {:?}", errors);
Err(eyre!("Errors encountered: {:?}", errors))
}
})
}

91
src/metrics/handlers.rs Normal file
View File

@@ -0,0 +1,91 @@
use std::sync::Arc;
use axum::{body::Body, extract::State, http::StatusCode, response::Response};
#[cfg(feature = "core")]
use crate::mode::Mode;
use crate::{axum::error::Result, metrics::Metrics};
pub(super) async fn metrics_handler(State(state): State<Arc<Metrics>>) -> Result<Response> {
let mut metrics = Vec::new();
state.prometheus.render_to_write(&mut metrics)?;
#[cfg(feature = "core")]
if Mode::is_core() {
use axum::http::{Request, header::HOST};
if [Mode::AllInOne, Mode::Server].contains(&Mode::get()) {
let req = Request::builder()
.method("GET")
.uri("http://localhost:8000/-/metrics/")
.header(HOST, "localhost")
.body(Body::from(""));
if let Ok(req) = req
&& let Some(server) = state.server.load_full()
{
let _ = server.client.request(req).await;
}
} else if [Mode::Worker].contains(&Mode::get()) {
let req = Request::builder()
.method("GET")
.uri("http://localhost:8000/-/metrics/")
.header(HOST, "localhost")
.body(Body::from(""));
if let Ok(req) = req
&& let Some(workers) = state.workers.load_full()
{
let _ = workers.client.request(req).await;
}
}
metrics.extend(tokio::task::spawn_blocking(python::get_python_metrics).await??);
}
Ok(Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/plain; version=1.0.0; charset=utf-8")
.body(Body::from(metrics))?)
}
#[cfg(feature = "core")]
mod python {
use eyre::{Report, Result};
use pyo3::{
IntoPyObjectExt as _,
ffi::c_str,
prelude::*,
types::{PyBytes, PyDict},
};
pub(super) fn get_python_metrics() -> Result<Vec<u8>> {
let metrics = Python::attach(|py| {
let locals = PyDict::new(py);
Python::run(
py,
c_str!(
r#"
from prometheus_client import (
CollectorRegistry,
generate_latest,
multiprocess,
)
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
output = generate_latest(registry)
"#
),
None,
Some(&locals),
)?;
let metrics = locals
.get_item("output")?
.unwrap_or(PyBytes::new(py, &[]).into_bound_py_any(py)?)
.cast::<PyBytes>()
.map_or_else(|_| PyBytes::new(py, &[]), |v| v.to_owned())
.as_bytes()
.to_owned();
Ok::<_, Report>(metrics)
})?;
Ok::<_, Report>(metrics)
}
}

84
src/metrics/mod.rs Normal file
View File

@@ -0,0 +1,84 @@
use std::{env::temp_dir, os::unix, sync::Arc, time::Duration};
use arc_swap::ArcSwapOption;
use axum::{Router, routing::any};
use eyre::Result;
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
use crate::{
arbiter::{Arbiter, Tasks},
axum::{router::wrap_router, server},
config,
};
#[cfg(feature = "core")]
use crate::{server::Server, worker::Workers};
mod handlers;
pub(crate) struct Metrics {
prometheus: PrometheusHandle,
#[cfg(feature = "core")]
pub(crate) server: ArcSwapOption<Server>,
#[cfg(feature = "core")]
pub(crate) workers: ArcSwapOption<Workers>,
}
impl Metrics {
fn new() -> Result<Self> {
let prometheus = PrometheusBuilder::new()
.with_recommended_naming(true)
.install_recorder()?;
Ok(Self {
prometheus,
#[cfg(feature = "core")]
server: ArcSwapOption::empty(),
#[cfg(feature = "core")]
workers: ArcSwapOption::empty(),
})
}
}
async fn run_upkeep(arbiter: Arbiter, state: Arc<Metrics>) -> Result<()> {
loop {
tokio::select! {
() = tokio::time::sleep(Duration::from_secs(5)) => {
let state_clone = Arc::clone(&state);
tokio::task::spawn_blocking(move || state_clone.prometheus.run_upkeep()).await?;
},
() = arbiter.shutdown() => return Ok(())
}
}
}
fn build_router(state: Arc<Metrics>) -> Router {
wrap_router(
Router::new()
.fallback(any(handlers::metrics_handler))
.with_state(state),
true,
)
}
pub(super) fn run(tasks: &mut Tasks) -> Result<Arc<Metrics>> {
let arbiter = tasks.arbiter();
let metrics = Arc::new(Metrics::new()?);
let router = build_router(Arc::clone(&metrics));
tasks
.build_task()
.name(&format!("{}::run_upkeep", module_path!(),))
.spawn(run_upkeep(arbiter, Arc::clone(&metrics)))?;
for addr in config::get().listen.metrics.iter().copied() {
server::start_plain(tasks, "metrics", router.clone(), addr)?;
}
server::start_unix(
tasks,
"metrics",
router,
unix::net::SocketAddr::from_pathname(temp_dir().join("authentik-metrics.sock"))?,
)?;
Ok(metrics)
}

78
src/mode.rs Normal file
View File

@@ -0,0 +1,78 @@
use std::{
env,
path::PathBuf,
sync::atomic::{AtomicU8, Ordering},
};
use eyre::Result;
static MODE: AtomicU8 = AtomicU8::new(0);
fn mode_path() -> PathBuf {
env::temp_dir().join("authentik-mode")
}
#[derive(PartialEq)]
#[repr(u8)]
pub(crate) enum Mode {
#[cfg(feature = "core")]
AllInOne = 0,
#[cfg(feature = "core")]
Server = 1,
#[cfg(feature = "core")]
Worker = 2,
#[cfg(feature = "proxy")]
Proxy = 3,
}
impl std::fmt::Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
#[cfg(feature = "core")]
Self::AllInOne => write!(f, "allinone"),
#[cfg(feature = "core")]
Self::Server => write!(f, "server"),
#[cfg(feature = "core")]
Self::Worker => write!(f, "worker"),
#[cfg(feature = "proxy")]
Self::Proxy => write!(f, "proxy"),
}
}
}
impl From<Mode> for u8 {
#[expect(clippy::as_conversions, reason = "repr of enum is u8")]
fn from(value: Mode) -> Self {
value as Self
}
}
impl Mode {
pub(crate) fn get() -> Self {
match MODE.load(Ordering::Relaxed) {
#[cfg(feature = "core")]
0 => Self::AllInOne,
#[cfg(feature = "core")]
1 => Self::Server,
#[cfg(feature = "core")]
2 => Self::Worker,
#[cfg(feature = "proxy")]
3 => Self::Proxy,
_ => unreachable!(),
}
}
pub(crate) fn set(mode: Self) -> Result<()> {
std::fs::write(mode_path(), mode.to_string())?;
MODE.store(mode.into(), Ordering::SeqCst);
Ok(())
}
pub(crate) fn is_core() -> bool {
match Self::get() {
#[cfg(feature = "core")]
Self::AllInOne | Self::Server | Self::Worker => true,
_ => false,
}
}
}

48
src/proxy/mod.rs Normal file
View File

@@ -0,0 +1,48 @@
use argh::FromArgs;
use axum::extract::Request;
use eyre::Result;
use crate::arbiter::{Arbiter, Tasks};
#[derive(Debug, FromArgs, PartialEq)]
/// Run the authentik proxy outpost.
#[argh(subcommand, name = "proxy")]
#[expect(
clippy::empty_structs_with_brackets,
reason = "argh doesn't support unit structs"
)]
pub(crate) struct Cli {}
pub(crate) mod tls {
use std::sync::Arc;
use rustls::{server::ClientHello, sign::CertifiedKey};
#[derive(Debug)]
pub(crate) struct CertResolver;
impl CertResolver {
#[expect(clippy::unused_self, reason = "still WIP")]
pub(crate) fn resolve(&self, _client_hello: &ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
None
}
}
}
pub(crate) fn can_handle(_request: &Request) -> bool {
false
}
pub(crate) async fn ignore_me(arbiter: Arbiter) -> Result<()> {
arbiter.shutdown().await;
Ok(())
}
pub(super) fn run(_cli: Cli, tasks: &mut Tasks) -> Result<()> {
let arbiter = tasks.arbiter();
tasks
.build_task()
.name(&format!("{}::ignore_me", module_path!(),))
.spawn(ignore_me(arbiter))?;
Ok(())
}

448
src/server/core.rs Normal file
View File

@@ -0,0 +1,448 @@
use std::sync::{Arc, LazyLock, atomic::Ordering};
use axum::{
Extension, Router,
body::Body,
extract::{Request, State},
http::{
HeaderName, HeaderValue, StatusCode, Uri,
header::{ACCEPT, CONTENT_TYPE, HOST, LOCATION, RETRY_AFTER},
},
middleware::{Next, from_fn},
response::{IntoResponse, Response},
routing::any,
};
use http_body_util::BodyExt as _;
use serde_json::json;
use crate::{
axum::{
accept::tls::TlsState,
error::Result,
extract::{client_ip::ClientIp, host::Host, scheme::Scheme, trusted_proxy::TrustedProxy},
router::wrap_router,
},
config, db,
server::{
GUNICORN_READY, Server,
core::websockets::{handle_websocket_upgrade, is_websocket_upgrade},
},
};
static STARTUP_RESPONSE_JSON: LazyLock<Response<String>> = LazyLock::new(|| {
Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.header(RETRY_AFTER, "5")
.header(CONTENT_TYPE, "application/json")
.body(
json!({
"error": "authentik starting",
})
.to_string(),
)
.expect("infallible")
});
static STARTUP_RESPONSE_HTML: LazyLock<Response<String>> = LazyLock::new(|| {
Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.header(CONTENT_TYPE, "text/html")
.body(include_str!("../../web/dist/standalone/loading/startup.html").to_owned())
.expect("infallible")
});
static STARTUP_RESPONSE_PLAIN: LazyLock<Response<String>> = LazyLock::new(|| {
Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.header(CONTENT_TYPE, "text/plain")
.body("authentik starting".to_owned())
.expect("infallible")
});
const SERVER: HeaderName = HeaderName::from_static("server");
const X_FORWARDED_CLIENT_CERT: HeaderName = HeaderName::from_static("x-forwarded-client-cert");
const X_FORWARDED_FOR: HeaderName = HeaderName::from_static("x-forwarded-for");
const X_FORWARDED_PROTO: HeaderName = HeaderName::from_static("x-forwarded-proto");
const X_POWERED_BY: HeaderName = HeaderName::from_static("x-powered-by");
const FORWARD_ALWAYS_REMOVED_HEADERS: [HeaderName; 7] = [
HeaderName::from_static("forwarded"),
HeaderName::from_static("host"),
X_FORWARDED_FOR,
HeaderName::from_static("x-forwarded-host"),
X_FORWARDED_PROTO,
HeaderName::from_static("x-forwarded-scheme"),
HeaderName::from_static("x-real-ip"),
];
const FORWARD_REMOVED_HEADERS_IF_UNTRUSTED: [HeaderName; 3] = [
HeaderName::from_static("ssl-client-cert"), // nginx-ingress
HeaderName::from_static("x-forwarded-tls-client-cert"), // traefik
X_FORWARDED_CLIENT_CERT, // envoy
];
fn startup_response(accept_header: &str) -> Response {
let response = if accept_header.contains("application/json") {
STARTUP_RESPONSE_JSON.clone()
} else if accept_header.contains("text/html") {
STARTUP_RESPONSE_HTML.clone()
} else {
STARTUP_RESPONSE_PLAIN.clone()
};
let (parts, body) = response.into_parts();
Response::from_parts(parts, body.into())
}
async fn forward_request(
ClientIp(client_ip): ClientIp,
Host(host): Host,
Scheme(scheme): Scheme,
State(server): State<Arc<Server>>,
TrustedProxy(trusted_proxy): TrustedProxy,
tls_state: Option<Extension<TlsState>>,
mut request: Request,
) -> Result<Response> {
let accept_header = request
.headers()
.get(ACCEPT)
.map(|v| v.to_str().unwrap_or_default().to_owned())
.unwrap_or_default();
if !GUNICORN_READY.load(Ordering::Relaxed) {
return Ok(startup_response(&accept_header));
}
let uri = Uri::builder()
.scheme("http")
.authority("localhost:8000")
.path_and_query(
request
.uri()
.path_and_query()
.map(|x| x.as_str())
.unwrap_or_default(),
)
.build()?;
*request.uri_mut() = uri;
for header_name in FORWARD_ALWAYS_REMOVED_HEADERS {
request.headers_mut().remove(header_name);
}
if !trusted_proxy {
for header_name in FORWARD_REMOVED_HEADERS_IF_UNTRUSTED {
request.headers_mut().remove(header_name);
}
}
request.headers_mut().insert(
X_FORWARDED_FOR,
HeaderValue::from_str(&client_ip.to_string())?,
);
request
.headers_mut()
.insert(HOST, HeaderValue::from_str(&host)?);
request
.headers_mut()
.insert(X_FORWARDED_PROTO, HeaderValue::from_str(scheme.as_ref())?);
if is_websocket_upgrade(request.headers()) {
return handle_websocket_upgrade(request, server).await;
}
if let Some(tls_state) = tls_state
&& let Some(peer_certificates) = &tls_state.peer_certificates
{
let xfcc = peer_certificates
.iter()
.map(|cert| {
let pem_encoded = pem::encode(&pem::Pem::new("CERTIFICATE", cert.as_ref()));
let url_encoded: String =
url::form_urlencoded::byte_serialize(pem_encoded.as_bytes()).collect();
format!("Cert={url_encoded}")
})
.collect::<Vec<_>>()
.join(",");
request
.headers_mut()
.insert("X_FORWARDED_CLIENT_CERT", HeaderValue::from_str(&xfcc)?);
}
match server.client.request(request).await {
Ok(res) => {
let (parts, body) = res.into_parts();
Ok(Response::from_parts(
parts,
Body::from_stream(body.into_data_stream()),
))
}
Err(_) => Ok(startup_response(&accept_header)),
}
}
fn build_gunicorn_router(server: Arc<Server>) -> Router {
wrap_router(
Router::new().fallback(forward_request).with_state(server),
config::get().debug, // enable tracing only in debug mode
)
}
async fn powered_by_middleware(request: Request, next: Next) -> Response {
let mut response = next.run(request).await;
response.headers_mut().remove(SERVER);
response
.headers_mut()
.insert(X_POWERED_BY, HeaderValue::from_static("authentik"));
response
}
async fn health_ready(State(server): State<Arc<Server>>) -> impl IntoResponse {
#[expect(clippy::if_same_then_else, reason = "For easier reading")]
if !server.is_alive().await {
StatusCode::SERVICE_UNAVAILABLE
} else if sqlx::query("SELECT 1").execute(db::get()).await.is_err() {
StatusCode::SERVICE_UNAVAILABLE
} else if let Some(workers) = server.workers.load_full()
&& !workers.are_alive().await
{
StatusCode::SERVICE_UNAVAILABLE
} else {
let req = Request::builder()
.method("GET")
.uri("http://localhost:8000/-/health/ready/")
.header(HOST, "localhost")
.body(Body::from(""));
if let Ok(req) = req
&& let Ok(res) = server.client.request(req).await
{
res.status()
} else {
StatusCode::SERVICE_UNAVAILABLE
}
}
}
pub(super) fn build_router(server: Arc<Server>) -> Router {
let router = wrap_router(
Router::new()
.route("/-/metrics/", any((StatusCode::NOT_FOUND, "not found")))
.route("/-/health/ready/", any(health_ready))
.with_state(Arc::clone(&server))
.merge(super::r#static::build_router()),
true,
)
.merge(build_gunicorn_router(server))
.layer(from_fn(powered_by_middleware));
let path = &config::get().web.path;
if config::get().web.path == "/" {
router
} else {
Router::new()
.route(
"/",
any(
async || match HeaderValue::try_from(&config::get().web.path) {
Ok(location) => (StatusCode::FOUND, [(LOCATION, location)]).into_response(),
Err(err) => {
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response()
}
},
),
)
.nest(path, router)
}
}
mod websockets {
use std::sync::Arc;
use axum::{
body::Body,
extract::Request,
http::{
HeaderMap, HeaderValue, StatusCode,
header::{
CONNECTION, SEC_WEBSOCKET_ACCEPT, SEC_WEBSOCKET_KEY, SEC_WEBSOCKET_VERSION, UPGRADE,
},
},
response::{IntoResponse as _, Response},
};
use futures::{SinkExt as _, StreamExt as _};
use hyper_util::rt::TokioIo;
use tokio::{net::UnixStream, sync::mpsc};
use tokio_tungstenite::{
WebSocketStream, client_async,
tungstenite::{Message, handshake::derive_accept_key, protocol::Role},
};
use tracing::{debug, trace, warn};
use crate::{
axum::error::{AppError, Result},
server::Server,
};
pub(super) fn is_websocket_upgrade(headers: &HeaderMap<HeaderValue>) -> bool {
let has_upgrade = headers
.get(UPGRADE)
.and_then(|v| v.to_str().ok())
.is_some_and(|v| v.eq_ignore_ascii_case("websocket"));
let has_connection = headers
.get(CONNECTION)
.and_then(|v| v.to_str().ok())
.is_some_and(|v| {
v.split(',')
.any(|part| part.trim().eq_ignore_ascii_case("upgrade"))
});
let has_websocket_key = headers.contains_key(SEC_WEBSOCKET_KEY);
let has_websocket_version = headers.contains_key(SEC_WEBSOCKET_VERSION);
has_upgrade && has_connection && has_websocket_key && has_websocket_version
}
pub(super) async fn handle_websocket_upgrade(
request: Request,
server: Arc<Server>,
) -> Result<Response> {
let Some(ws_key) = request
.headers()
.get(SEC_WEBSOCKET_KEY)
.and_then(|key| key.to_str().ok())
else {
return Ok((StatusCode::BAD_REQUEST, "").into_response());
};
let ws_accept = derive_accept_key(ws_key.as_bytes());
let path_q = request
.uri()
.path_and_query()
.map(|x| x.as_str())
.unwrap_or_default();
let uri = format!("ws://localhost:8000{path_q}");
let mut ws_request =
tokio_tungstenite::tungstenite::handshake::client::Request::builder().uri(uri);
for (k, v) in request.headers() {
ws_request = ws_request.header(k.as_str(), v);
}
let ws_request = ws_request.body(())?;
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
.header(UPGRADE, "websocket")
.header(CONNECTION, "upgrade")
.header(SEC_WEBSOCKET_ACCEPT, ws_accept)
.body(Body::empty())?;
tokio::spawn(async move {
if let Err(err) = handle_websocket_connection(request, server, ws_request).await {
warn!("WebSocket connection error: {}", err.0);
}
});
Ok(response)
}
async fn handle_websocket_connection(
request: Request,
server: Arc<Server>,
ws_request: tokio_tungstenite::tungstenite::handshake::client::Request,
) -> Result<()> {
let upgraded = hyper::upgrade::on(request).await?;
let io = TokioIo::new(upgraded);
let client_ws = WebSocketStream::from_raw_socket(io, Role::Server, None).await;
let upstream_ws = {
let stream = UnixStream::connect(&server.socket_path).await?;
let (ws_stream, _) = client_async(ws_request, stream).await?;
ws_stream
};
let (mut client_sender, mut client_receiver) = client_ws.split();
let (mut upstream_sender, mut upstream_receiver) = upstream_ws.split();
let (close_tx, mut close_rx) = mpsc::channel::<()>(1);
let close_tx_upstream = close_tx.clone();
let client_to_upstream = tokio::spawn(async move {
let mut client_closed = false;
while let Some(msg) = client_receiver.next().await {
let msg = msg?;
match msg {
Message::Close(_) => {
if !client_closed {
upstream_sender.send(Message::Close(None)).await?;
let _ = close_tx.send(()).await;
client_closed = true;
break;
}
}
msg @ (Message::Binary(_)
| Message::Text(_)
| Message::Ping(_)
| Message::Pong(_)) => {
if !client_closed {
upstream_sender.send(msg).await?;
}
}
Message::Frame(_) => {}
}
}
if !client_closed {
upstream_sender.send(Message::Close(None)).await?;
let _ = close_tx.send(()).await;
}
Ok::<_, AppError>(())
});
let upstream_to_client = tokio::spawn(async move {
let mut upstream_closed = false;
while let Some(msg) = upstream_receiver.next().await {
let msg = msg?;
match msg {
Message::Close(_) => {
if !upstream_closed {
client_sender.send(Message::Close(None)).await?;
let _ = close_tx_upstream.send(()).await;
upstream_closed = true;
break;
}
}
msg @ (Message::Binary(_)
| Message::Text(_)
| Message::Ping(_)
| Message::Pong(_)) => {
if !upstream_closed {
client_sender.send(msg).await?;
}
}
Message::Frame(_) => {}
}
}
if !upstream_closed {
client_sender.send(Message::Close(None)).await?;
let _ = close_tx_upstream.send(()).await;
}
Ok::<_, AppError>(())
});
tokio::select! {
_ = close_rx.recv() => {
trace!("WebSocket connection closed gracefully");
},
res = client_to_upstream => {
if let Err(err) = res {
debug!("Client to upstream task failed: {:?}", err);
}
}
res = upstream_to_client => {
if let Err(err) = res {
debug!("Upstream to client task failed: {:?}", err);
}
}
}
Ok(())
}
}

255
src/server/mod.rs Normal file
View File

@@ -0,0 +1,255 @@
use std::{
env::temp_dir,
os::unix,
path::PathBuf,
process::Stdio,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
time::Duration,
};
use arc_swap::ArcSwapOption;
use argh::FromArgs;
use axum::{Router, body::Body, extract::Request, http::status::StatusCode, routing::any};
use eyre::{Result, eyre};
use hyper_unix_socket::UnixSocketConnector;
use hyper_util::{client::legacy::Client, rt::TokioExecutor};
use nix::{
sys::signal::{Signal, kill},
unistd::Pid,
};
use tokio::{
net::UnixStream,
process::{Child, Command},
signal::unix::SignalKind,
sync::{Mutex, broadcast::error::RecvError},
time::Instant,
};
use tower::ServiceExt as _;
use tower_http::timeout::TimeoutLayer;
use tracing::{info, trace, warn};
use crate::{
arbiter::{Arbiter, Tasks},
axum::server,
config,
worker::Workers,
};
pub(super) static GUNICORN_READY: AtomicBool = AtomicBool::new(false);
pub(crate) mod core;
mod r#static;
mod tls;
#[derive(Debug, Default, FromArgs, PartialEq)]
/// Run the authentik server.
#[argh(subcommand, name = "server")]
#[expect(
clippy::empty_structs_with_brackets,
reason = "argh doesn't support unit structs"
)]
pub(super) struct Cli {}
pub(crate) struct Server {
gunicorn: Mutex<Child>,
socket_path: PathBuf,
pub(crate) client: Client<UnixSocketConnector<PathBuf>, Body>,
pub(crate) workers: ArcSwapOption<Workers>,
}
impl Server {
fn new(socket_path: PathBuf) -> Result<Self> {
info!("starting gunicorn");
let gunicorn = Command::new("gunicorn")
.args([
"--bind",
&format!("unix://{}", socket_path.display()),
"-c",
"./lifecycle/gunicorn.conf.py",
"authentik.root.asgi:application",
])
.kill_on_drop(true)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.spawn()?;
let client = Client::builder(TokioExecutor::new())
.pool_idle_timeout(Duration::from_secs(60))
.set_host(false)
.build(UnixSocketConnector::new(socket_path.clone()));
Ok(Self {
gunicorn: Mutex::new(gunicorn),
socket_path,
client,
workers: ArcSwapOption::empty(),
})
}
async fn shutdown(&self, signal: Signal) -> Result<()> {
trace!(
signal = signal.as_str(),
"sending shutdown signal to gunicorn"
);
let mut gunicorn = self.gunicorn.lock().await;
if let Some(id) = gunicorn.id() {
kill(Pid::from_raw(id.cast_signed()), signal)?;
}
gunicorn.wait().await?;
drop(gunicorn);
Ok(())
}
async fn graceful_shutdown(&self) -> Result<()> {
info!("gracefully shutting down gunicorn");
self.shutdown(Signal::SIGTERM).await
}
async fn fast_shutdown(&self) -> Result<()> {
info!("immediately shutting down gunicorn");
self.shutdown(Signal::SIGINT).await
}
async fn is_alive(&self) -> bool {
let try_wait = self.gunicorn.lock().await.try_wait();
match try_wait {
Ok(Some(code)) => {
warn!("gunicorn has exited with status {code}");
false
}
Ok(None) => true,
Err(err) => {
warn!("failed to check the status of gunicorn process, ignoring: {err}");
true
}
}
}
async fn is_socket_ready(&self) -> bool {
let result = UnixStream::connect(&self.socket_path).await;
trace!("checking if gunicorn is ready: {result:?}");
result.is_ok()
}
}
async fn watch_server(arbiter: Arbiter, server: Arc<Server>) -> Result<()> {
info!("starting server watcher");
let mut signals_rx = arbiter.signals_subscribe();
loop {
tokio::select! {
signal = signals_rx.recv() => {
match signal {
Ok(signal) => {
if signal == SignalKind::user_defined1() {
info!("gunicorn notified us ready, marked ready for operation");
GUNICORN_READY.store(true, Ordering::Relaxed);
arbiter.mark_gunicorn_ready();
}
},
Err(RecvError::Lagged(_)) => {},
Err(RecvError::Closed) => {
warn!("error receiving signals");
return Err(RecvError::Closed.into());
}
}
},
() = tokio::time::sleep(Duration::from_secs(1)), if !GUNICORN_READY.load(Ordering::Relaxed) => {
// On some platforms the SIGUSR1 can be missed.
// Fall back to probing the gunicorn unix socket and mark ready once it accepts connections.
if server.is_socket_ready().await {
info!("gunicorn socket is accepting connections, marked ready for operation");
GUNICORN_READY.store(true, Ordering::Relaxed);
arbiter.mark_gunicorn_ready();
}
},
() = tokio::time::sleep(Duration::from_secs(5)) => {
if !server.is_alive().await {
return Err(eyre!("gunicorn has exited unexpectedly"));
}
},
() = arbiter.fast_shutdown() => {
server.fast_shutdown().await?;
return Ok(());
},
() = arbiter.graceful_shutdown() => {
server.graceful_shutdown().await?;
return Ok(());
},
}
}
}
fn build_router(server: Arc<Server>) -> Router {
let core_router = core::build_router(server);
let proxy_router: Option<Router> = None;
let config = config::get();
let timeout = durstr::parse(&config.web.timeout_http_read_header)
.expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_read).expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_write).expect("Invalid duration in http timeout")
+ durstr::parse(&config.web.timeout_http_idle).expect("Invalid duration in http timeout");
let timeout_layer = TimeoutLayer::with_status_code(StatusCode::REQUEST_TIMEOUT, timeout);
Router::new()
.fallback(any(async |request: Request<Body>| {
metrics::describe_histogram!(
"authentik_main_request_duration",
metrics::Unit::Seconds,
"API request latencies in seconds"
);
let now = Instant::now();
if let Some(proxy_router) = proxy_router
&& crate::proxy::can_handle(&request)
{
let res = proxy_router.oneshot(request).await;
metrics::histogram!("authentik_main_request_duration", "dest" => "embedded_outpost")
.record(now.elapsed());
res
} else {
let res = core_router.oneshot(request).await;
metrics::histogram!("authentik_main_request_duration", "dest" => "core")
.record(now.elapsed());
res
}
}))
.layer(timeout_layer)
}
pub(super) fn run(_cli: Cli, tasks: &mut Tasks) -> Result<Arc<Server>> {
let config = config::get();
let arbiter = tasks.arbiter();
let server = Arc::new(Server::new(temp_dir().join("authentik-gunicorn.sock"))?);
tasks
.build_task()
.name(&format!("{}::watch_server", module_path!()))
.spawn(watch_server(arbiter.clone(), Arc::clone(&server)))?;
let router = build_router(Arc::clone(&server));
for addr in config.listen.http.iter().copied() {
server::start_plain(tasks, "server", router.clone(), addr)?;
}
let tls_config = tls::make_initial_tls_config()?;
for addr in config.listen.https.iter().copied() {
server::start_tls(tasks, "tls", router.clone(), addr, tls_config.clone())?;
}
tasks
.build_task()
.name(&format!("{}::tls::watch_tls_config", module_path!(),))
.spawn(tls::watch_tls_config(arbiter, tls_config))?;
server::start_unix(
tasks,
"server",
router,
unix::net::SocketAddr::from_pathname(temp_dir().join("authentik.sock"))?,
)?;
Ok(server)
}

Some files were not shown because too many files have changed in this diff Show More