454 lines
14 KiB
Rust
454 lines
14 KiB
Rust
//! Integration tests for ONNX face recognition pipeline
|
|
//!
|
|
//! These tests require the `onnx` feature to be enabled:
|
|
//!
|
|
//! ```bash
|
|
//! cargo test --features onnx
|
|
//! ```
|
|
//!
|
|
//! Note: Tests that load actual ONNX models are marked with `#[ignore]` by default
|
|
//! since they require model files to be present. Run with:
|
|
//!
|
|
//! ```bash
|
|
//! cargo test --features onnx -- --ignored
|
|
//! ```
|
|
|
|
#![cfg(feature = "onnx")]
|
|
|
|
use linux_hello_daemon::onnx::{
|
|
FaceAligner, OnnxEmbeddingExtractor, OnnxFaceDetector, OnnxModelConfig, OnnxPipeline,
|
|
REFERENCE_LANDMARKS_112,
|
|
};
|
|
use linux_hello_daemon::{FaceDetect, EmbeddingExtractor};
|
|
use std::path::Path;
|
|
|
|
/// Model directory path
|
|
const MODEL_DIR: &str = "../models";
|
|
|
|
/// Test image dimensions
|
|
const TEST_WIDTH: u32 = 640;
|
|
const TEST_HEIGHT: u32 = 480;
|
|
|
|
/// Create a synthetic test image with face-like pattern
|
|
fn create_test_image(width: u32, height: u32) -> Vec<u8> {
|
|
let mut image = vec![128u8; (width * height) as usize];
|
|
|
|
// Create a simple face-like pattern in the center
|
|
let face_x = width / 4;
|
|
let face_y = height / 4;
|
|
let face_w = width / 2;
|
|
let face_h = height / 2;
|
|
|
|
// Draw face region (lighter)
|
|
for y in face_y..(face_y + face_h) {
|
|
for x in face_x..(face_x + face_w) {
|
|
let idx = (y * width + x) as usize;
|
|
image[idx] = 180;
|
|
}
|
|
}
|
|
|
|
// Draw "eyes" (darker spots)
|
|
let eye_y = face_y + face_h / 4;
|
|
let left_eye_x = face_x + face_w / 3;
|
|
let right_eye_x = face_x + 2 * face_w / 3;
|
|
let eye_radius = face_w / 10;
|
|
|
|
for dy in 0..eye_radius {
|
|
for dx in 0..eye_radius {
|
|
let idx_l = ((eye_y + dy) * width + left_eye_x + dx) as usize;
|
|
let idx_r = ((eye_y + dy) * width + right_eye_x + dx) as usize;
|
|
if idx_l < image.len() {
|
|
image[idx_l] = 60;
|
|
}
|
|
if idx_r < image.len() {
|
|
image[idx_r] = 60;
|
|
}
|
|
}
|
|
}
|
|
|
|
image
|
|
}
|
|
|
|
// =============================================================================
|
|
// Unit Tests (no model files required)
|
|
// =============================================================================
|
|
|
|
mod alignment_tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_aligner_default_size() {
|
|
let aligner = FaceAligner::new();
|
|
assert_eq!(aligner.output_size(), (112, 112));
|
|
}
|
|
|
|
#[test]
|
|
fn test_aligner_custom_size() {
|
|
let aligner = FaceAligner::with_size(224, 224);
|
|
assert_eq!(aligner.output_size(), (224, 224));
|
|
}
|
|
|
|
#[test]
|
|
fn test_align_produces_correct_output_size() {
|
|
let aligner = FaceAligner::new();
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
|
|
// Create fake landmarks in pixel coordinates
|
|
let landmarks = [
|
|
[200.0, 150.0], // Left eye
|
|
[300.0, 150.0], // Right eye
|
|
[250.0, 200.0], // Nose
|
|
[210.0, 250.0], // Left mouth
|
|
[290.0, 250.0], // Right mouth
|
|
];
|
|
|
|
let result = aligner.align(&image, TEST_WIDTH, TEST_HEIGHT, &landmarks);
|
|
assert!(result.is_ok());
|
|
|
|
let aligned = result.unwrap();
|
|
assert_eq!(aligned.len(), 112 * 112);
|
|
}
|
|
|
|
#[test]
|
|
fn test_simple_crop_fallback() {
|
|
let aligner = FaceAligner::new();
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
|
|
let result = aligner.simple_crop(
|
|
&image,
|
|
TEST_WIDTH,
|
|
TEST_HEIGHT,
|
|
100, // face_x
|
|
100, // face_y
|
|
200, // face_width
|
|
200, // face_height
|
|
);
|
|
|
|
assert!(result.is_ok());
|
|
let cropped = result.unwrap();
|
|
assert_eq!(cropped.len(), 112 * 112);
|
|
}
|
|
|
|
#[test]
|
|
fn test_reference_landmarks_validity() {
|
|
// Left eye should be left of right eye
|
|
assert!(REFERENCE_LANDMARKS_112[0][0] < REFERENCE_LANDMARKS_112[1][0]);
|
|
|
|
// Eyes should be at similar height
|
|
let eye_y_diff = (REFERENCE_LANDMARKS_112[0][1] - REFERENCE_LANDMARKS_112[1][1]).abs();
|
|
assert!(eye_y_diff < 1.0);
|
|
|
|
// Nose should be below eyes
|
|
assert!(REFERENCE_LANDMARKS_112[2][1] > REFERENCE_LANDMARKS_112[0][1]);
|
|
|
|
// Mouth corners should be below nose
|
|
assert!(REFERENCE_LANDMARKS_112[3][1] > REFERENCE_LANDMARKS_112[2][1]);
|
|
assert!(REFERENCE_LANDMARKS_112[4][1] > REFERENCE_LANDMARKS_112[2][1]);
|
|
}
|
|
}
|
|
|
|
mod config_tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_default_config() {
|
|
let config = OnnxModelConfig::default();
|
|
assert_eq!(config.num_threads, 0);
|
|
assert!(!config.use_gpu);
|
|
assert_eq!(config.detection_input_size, (640, 640));
|
|
assert_eq!(config.embedding_input_size, (112, 112));
|
|
}
|
|
|
|
#[test]
|
|
fn test_fast_config() {
|
|
let config = OnnxModelConfig::fast();
|
|
assert_eq!(config.detection_input_size, (320, 320));
|
|
assert_eq!(config.num_threads, 4);
|
|
}
|
|
|
|
#[test]
|
|
fn test_accurate_config() {
|
|
let config = OnnxModelConfig::accurate();
|
|
assert_eq!(config.detection_input_size, (640, 640));
|
|
}
|
|
}
|
|
|
|
mod detector_tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_detector_stub_without_model() {
|
|
// Without actual model, detector should be created but return errors on use
|
|
let detector = OnnxFaceDetector::load("nonexistent.onnx");
|
|
|
|
// On non-onnx builds, this returns a stub
|
|
// On onnx builds, this returns an error because file doesn't exist
|
|
// Both behaviors are acceptable
|
|
if let Ok(det) = detector {
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
let result = det.detect(&image, TEST_WIDTH, TEST_HEIGHT);
|
|
// Should fail because model not actually loaded
|
|
assert!(result.is_err());
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_detector_input_size_accessors() {
|
|
if let Ok(detector) = OnnxFaceDetector::load("test.onnx") {
|
|
let (w, h) = detector.input_size();
|
|
assert!(w > 0);
|
|
assert!(h > 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
mod embedding_tests {
|
|
use super::*;
|
|
use image::GrayImage;
|
|
|
|
#[test]
|
|
fn test_embedding_stub_without_model() {
|
|
let extractor = OnnxEmbeddingExtractor::load("nonexistent.onnx");
|
|
|
|
if let Ok(ext) = extractor {
|
|
let face = GrayImage::from_raw(112, 112, vec![128u8; 112 * 112]).unwrap();
|
|
let result = ext.extract(&face);
|
|
// Should fail because model not actually loaded
|
|
assert!(result.is_err());
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_embedding_dimension() {
|
|
if let Ok(extractor) = OnnxEmbeddingExtractor::load("test.onnx") {
|
|
assert!(extractor.embedding_dimension() > 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// Integration Tests (require model files)
|
|
// =============================================================================
|
|
|
|
mod integration_with_models {
|
|
use super::*;
|
|
|
|
fn model_path(name: &str) -> String {
|
|
format!("{}/{}", MODEL_DIR, name)
|
|
}
|
|
|
|
fn models_available() -> bool {
|
|
Path::new(&model_path("retinaface.onnx")).exists()
|
|
&& Path::new(&model_path("mobilefacenet.onnx")).exists()
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_load_detection_model() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let result = OnnxFaceDetector::load(model_path("retinaface.onnx"));
|
|
assert!(result.is_ok(), "Failed to load detection model: {:?}", result.err());
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_load_embedding_model() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let result = OnnxEmbeddingExtractor::load(model_path("mobilefacenet.onnx"));
|
|
assert!(result.is_ok(), "Failed to load embedding model: {:?}", result.err());
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_detection_on_synthetic_image() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let detector = OnnxFaceDetector::load(model_path("retinaface.onnx"))
|
|
.expect("Failed to load detector");
|
|
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
let detections = detector.detect(&image, TEST_WIDTH, TEST_HEIGHT);
|
|
|
|
assert!(detections.is_ok(), "Detection failed: {:?}", detections.err());
|
|
// Note: synthetic image may or may not trigger detections
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_embedding_extraction() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let extractor = OnnxEmbeddingExtractor::load(model_path("mobilefacenet.onnx"))
|
|
.expect("Failed to load extractor");
|
|
|
|
// Create aligned face image
|
|
let face_data = vec![128u8; 112 * 112];
|
|
let result = extractor.extract_from_bytes(&face_data, 112, 112);
|
|
|
|
assert!(result.is_ok(), "Embedding extraction failed: {:?}", result.err());
|
|
|
|
let embedding = result.unwrap();
|
|
assert_eq!(embedding.len(), extractor.embedding_dimension());
|
|
|
|
// Check embedding is normalized (L2 norm should be ~1)
|
|
let norm: f32 = embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
|
|
assert!((norm - 1.0).abs() < 0.1, "Embedding not normalized: norm = {}", norm);
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_full_pipeline() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let pipeline = OnnxPipeline::load(
|
|
model_path("retinaface.onnx"),
|
|
model_path("mobilefacenet.onnx"),
|
|
)
|
|
.expect("Failed to load pipeline");
|
|
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
let results = pipeline.process_frame(&image, TEST_WIDTH, TEST_HEIGHT);
|
|
|
|
assert!(results.is_ok(), "Pipeline processing failed: {:?}", results.err());
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_embedding_consistency() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let extractor = OnnxEmbeddingExtractor::load(model_path("mobilefacenet.onnx"))
|
|
.expect("Failed to load extractor");
|
|
|
|
// Same face should produce similar embeddings
|
|
let face_data = vec![128u8; 112 * 112];
|
|
|
|
let embedding1 = extractor.extract_from_bytes(&face_data, 112, 112)
|
|
.expect("First extraction failed");
|
|
let embedding2 = extractor.extract_from_bytes(&face_data, 112, 112)
|
|
.expect("Second extraction failed");
|
|
|
|
// Compute cosine similarity
|
|
let dot: f32 = embedding1.iter().zip(&embedding2).map(|(a, b)| a * b).sum();
|
|
let norm1: f32 = embedding1.iter().map(|x| x * x).sum::<f32>().sqrt();
|
|
let norm2: f32 = embedding2.iter().map(|x| x * x).sum::<f32>().sqrt();
|
|
let similarity = dot / (norm1 * norm2);
|
|
|
|
// Same input should give identical output (similarity = 1.0)
|
|
assert!(
|
|
(similarity - 1.0).abs() < 0.001,
|
|
"Same input gave different embeddings: similarity = {}",
|
|
similarity
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "Requires ONNX model files to be present"]
|
|
fn test_different_faces_produce_different_embeddings() {
|
|
if !models_available() {
|
|
eprintln!("Skipping: model files not found");
|
|
return;
|
|
}
|
|
|
|
let extractor = OnnxEmbeddingExtractor::load(model_path("mobilefacenet.onnx"))
|
|
.expect("Failed to load extractor");
|
|
|
|
// Two different "faces"
|
|
let face1 = vec![100u8; 112 * 112];
|
|
let face2 = vec![200u8; 112 * 112];
|
|
|
|
let embedding1 = extractor.extract_from_bytes(&face1, 112, 112)
|
|
.expect("First extraction failed");
|
|
let embedding2 = extractor.extract_from_bytes(&face2, 112, 112)
|
|
.expect("Second extraction failed");
|
|
|
|
// Compute cosine similarity
|
|
let dot: f32 = embedding1.iter().zip(&embedding2).map(|(a, b)| a * b).sum();
|
|
let norm1: f32 = embedding1.iter().map(|x| x * x).sum::<f32>().sqrt();
|
|
let norm2: f32 = embedding2.iter().map(|x| x * x).sum::<f32>().sqrt();
|
|
let similarity = dot / (norm1 * norm2);
|
|
|
|
// Different inputs should produce different embeddings
|
|
assert!(
|
|
similarity < 0.99,
|
|
"Different inputs gave too similar embeddings: similarity = {}",
|
|
similarity
|
|
);
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// Benchmark-style tests (optional, for performance tracking)
|
|
// =============================================================================
|
|
|
|
#[cfg(test)]
|
|
mod benchmarks {
|
|
use super::*;
|
|
use std::time::Instant;
|
|
|
|
#[test]
|
|
fn test_alignment_performance() {
|
|
let aligner = FaceAligner::new();
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
let landmarks = [
|
|
[200.0, 150.0],
|
|
[300.0, 150.0],
|
|
[250.0, 200.0],
|
|
[210.0, 250.0],
|
|
[290.0, 250.0],
|
|
];
|
|
|
|
let iterations = 100;
|
|
let start = Instant::now();
|
|
|
|
for _ in 0..iterations {
|
|
let _ = aligner.align(&image, TEST_WIDTH, TEST_HEIGHT, &landmarks);
|
|
}
|
|
|
|
let elapsed = start.elapsed();
|
|
let avg_ms = elapsed.as_millis() as f64 / iterations as f64;
|
|
|
|
println!("Face alignment: {:.2}ms per iteration", avg_ms);
|
|
assert!(avg_ms < 50.0, "Alignment too slow: {}ms", avg_ms);
|
|
}
|
|
|
|
#[test]
|
|
fn test_simple_crop_performance() {
|
|
let aligner = FaceAligner::new();
|
|
let image = create_test_image(TEST_WIDTH, TEST_HEIGHT);
|
|
|
|
let iterations = 100;
|
|
let start = Instant::now();
|
|
|
|
for _ in 0..iterations {
|
|
let _ = aligner.simple_crop(&image, TEST_WIDTH, TEST_HEIGHT, 100, 100, 200, 200);
|
|
}
|
|
|
|
let elapsed = start.elapsed();
|
|
let avg_ms = elapsed.as_millis() as f64 / iterations as f64;
|
|
|
|
println!("Simple crop: {:.2}ms per iteration", avg_ms);
|
|
assert!(avg_ms < 20.0, "Simple crop too slow: {}ms", avg_ms);
|
|
}
|
|
}
|