Files
Linux-Hello/linux-hello-daemon/src/detection/mod.rs
eliott 8c478836d8 feat: ONNX face detection, IR camera support, and PAM authentication
Wire up ONNX RetinaFace detector and MobileFaceNet embeddings in the CLI
and auth pipeline. Add IR camera detection for Windows Hello-style
"Integrated I" cameras and greyscale-only format heuristic. Add histogram
normalization for underexposed IR frames from low-power emitters.

- Add `onnx` feature flag to CLI crate forwarding to daemon
- Wire ONNX detector into `detect` command with fallback to simple detector
- Fix IR camera detection for Chicony "Integrated I" naming pattern
- Add `normalize_if_dark()` for underexposed IR frames in auth pipeline
- Load user config from ~/.config/linux-hello/ as fallback
- Update systemd service for IR emitter integration and camera access
- Add system installation script and ONNX runtime installer
- Update .gitignore for local dev artifacts

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-02 15:04:52 +02:00

270 lines
8.4 KiB
Rust

//! Face Detection Module
//!
//! This module provides face detection functionality for the authentication pipeline.
//! It includes types for representing detected faces and traits for implementing
//! different detection backends.
//!
//! # Overview
//!
//! Face detection is the first ML step in the authentication pipeline. It locates
//! faces in camera frames and provides bounding boxes for subsequent processing.
//!
//! # Coordinate System
//!
//! All coordinates are normalized to the range [0, 1] for resolution independence:
//! - (0, 0) is the top-left corner
//! - (1, 1) is the bottom-right corner
//! - Use [`FaceDetection::to_pixels`] to convert to pixel coordinates
//!
//! # Detection Backends
//!
//! The module supports multiple detection backends via the [`FaceDetect`] trait:
//!
//! - [`SimpleFaceDetector`] - Basic detection for testing (no ML model required)
//! - ONNX-based detectors (planned) - BlazeFace, MTCNN, RetinaFace
//!
//! # Example
//!
//! ```rust
//! use linux_hello_daemon::{FaceDetection, FaceDetect, SimpleFaceDetector};
//!
//! // Create a detector
//! let detector = SimpleFaceDetector::new(0.5);
//!
//! // Detect faces in a grayscale image
//! let image = vec![128u8; 640 * 480];
//! let detections = detector.detect(&image, 640, 480).unwrap();
//!
//! for face in &detections {
//! let (x, y, w, h) = face.to_pixels(640, 480);
//! println!("Face at ({}, {}) size {}x{}, confidence: {:.2}",
//! x, y, w, h, face.confidence);
//! }
//! ```
use linux_hello_common::Result;
/// A detected face with bounding box and confidence score.
///
/// Coordinates are normalized to [0, 1] for resolution independence.
/// Use [`to_pixels`](Self::to_pixels) to convert to actual pixel coordinates.
///
/// # Example
///
/// ```rust
/// use linux_hello_daemon::FaceDetection;
///
/// let detection = FaceDetection {
/// x: 0.25, // Face starts at 25% from left
/// y: 0.1, // Face starts at 10% from top
/// width: 0.5, // Face is 50% of image width
/// height: 0.8, // Face is 80% of image height
/// confidence: 0.95,
/// };
///
/// // Convert to 640x480 pixel coordinates
/// let (px, py, pw, ph) = detection.to_pixels(640, 480);
/// assert_eq!((px, py, pw, ph), (160, 48, 320, 384));
/// ```
#[derive(Debug, Clone)]
#[allow(dead_code)] // Public API, fields used by detection methods
pub struct FaceDetection {
/// X coordinate of top-left corner (0.0-1.0 normalized).
pub x: f32,
/// Y coordinate of top-left corner (0.0-1.0 normalized).
pub y: f32,
/// Width of bounding box (0.0-1.0 normalized).
pub width: f32,
/// Height of bounding box (0.0-1.0 normalized).
pub height: f32,
/// Detection confidence score (0.0-1.0).
/// Higher values indicate more confident detections.
pub confidence: f32,
}
impl FaceDetection {
/// Convert normalized coordinates to pixel coordinates.
///
/// # Arguments
///
/// * `img_width` - Width of the image in pixels
/// * `img_height` - Height of the image in pixels
///
/// # Returns
///
/// A tuple of (x, y, width, height) in pixel coordinates.
///
/// # Example
///
/// ```rust
/// use linux_hello_daemon::FaceDetection;
///
/// let det = FaceDetection {
/// x: 0.5, y: 0.5, width: 0.25, height: 0.25, confidence: 0.9
/// };
/// let (x, y, w, h) = det.to_pixels(100, 100);
/// assert_eq!((x, y, w, h), (50, 50, 25, 25));
/// ```
#[allow(dead_code)] // Public API, used by auth service
pub fn to_pixels(&self, img_width: u32, img_height: u32) -> (u32, u32, u32, u32) {
let x = (self.x * img_width as f32) as u32;
let y = (self.y * img_height as f32) as u32;
let w = (self.width * img_width as f32) as u32;
let h = (self.height * img_height as f32) as u32;
(x, y, w, h)
}
}
/// Trait for face detection backends.
///
/// Implement this trait to add support for different face detection models
/// or algorithms. All implementations should return normalized coordinates.
///
/// # Implementing a Custom Detector
///
/// ```rust,ignore
/// use linux_hello_daemon::{FaceDetect, FaceDetection};
/// use linux_hello_common::Result;
///
/// struct MyDetector {
/// model: OnnxModel,
/// }
///
/// impl FaceDetect for MyDetector {
/// fn detect(&self, image_data: &[u8], width: u32, height: u32) -> Result<Vec<FaceDetection>> {
/// // Run model inference and return detections
/// let detections = self.model.run(image_data, width, height)?;
/// Ok(detections)
/// }
/// }
/// ```
#[allow(dead_code)] // Public API trait
pub trait FaceDetect {
/// Detect faces in a grayscale image.
///
/// # Arguments
///
/// * `image_data` - Raw grayscale pixel data (one byte per pixel)
/// * `width` - Image width in pixels
/// * `height` - Image height in pixels
///
/// # Returns
///
/// A vector of detected faces with normalized coordinates and confidence scores.
/// Returns an empty vector if no faces are detected.
fn detect(&self, image_data: &[u8], width: u32, height: u32) -> Result<Vec<FaceDetection>>;
}
/// Simple face detection using basic image analysis.
///
/// This is a placeholder implementation that assumes a centered face
/// if the image has reasonable contrast. It is intended for testing only
/// and should not be used in production.
///
/// # Algorithm
///
/// Returns a centered face detection if the image mean brightness
/// is between 30 and 225 (indicating reasonable contrast).
#[allow(dead_code)] // Public API, used by lib but not bin
pub fn detect_face_simple(image_data: &[u8], _width: u32, _height: u32) -> Option<FaceDetection> {
// Very simple centered face assumption for testing
// In production, this would use proper CV techniques
if image_data.is_empty() {
return None;
}
// Calculate image statistics
let sum: u64 = image_data.iter().map(|&x| x as u64).sum();
let mean = sum / image_data.len() as u64;
// If image has reasonable contrast, assume face is centered
if mean > 30 && mean < 225 {
Some(FaceDetection {
x: 0.25,
y: 0.15,
width: 0.5,
height: 0.7,
confidence: 0.5, // Low confidence for simple detection
})
} else {
None
}
}
/// Placeholder face detector for testing (no ML model required)
#[allow(dead_code)] // Public API for testing
pub struct SimpleFaceDetector {
confidence_threshold: f32,
}
impl SimpleFaceDetector {
/// Create a new simple face detector
///
/// Public API - used for testing and placeholder implementation
#[allow(dead_code)] // Public API, used in tests
pub fn new(confidence_threshold: f32) -> Self {
Self {
confidence_threshold,
}
}
}
impl FaceDetect for SimpleFaceDetector {
fn detect(&self, image_data: &[u8], width: u32, height: u32) -> Result<Vec<FaceDetection>> {
match detect_face_simple(image_data, width, height) {
Some(det) if det.confidence >= self.confidence_threshold => Ok(vec![det]),
Some(_) => Ok(vec![]), // Below threshold
None => Ok(vec![]),
}
}
}
// TODO: Add OnnxFaceDetector once ort API stabilizes
// The ort 2.0.0-rc API has breaking changes from 1.x
// Will implement proper ONNX detection in Phase 2
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_face_detection_pixels() {
let detection = FaceDetection {
x: 0.25,
y: 0.1,
width: 0.5,
height: 0.8,
confidence: 0.95,
};
let (x, y, w, h) = detection.to_pixels(640, 480);
assert_eq!(x, 160);
assert_eq!(y, 48);
assert_eq!(w, 320);
assert_eq!(h, 384);
}
#[test]
fn test_simple_detection() {
// Create a simple gradient image
let width = 100;
let height = 100;
let image: Vec<u8> = (0..width * height)
.map(|i| ((i % 256) as u8).wrapping_add(50))
.collect();
let detection = detect_face_simple(&image, width as u32, height as u32);
assert!(detection.is_some());
}
#[test]
fn test_simple_detector_trait() {
let detector = SimpleFaceDetector::new(0.3);
let image: Vec<u8> = vec![128; 100 * 100]; // Mid-gray image
let detections = detector.detect(&image, 100, 100).unwrap();
assert_eq!(detections.len(), 1);
}
}