refactor: remove unnecessarily wrap value into a Result

this simplify code and remove some unwrap.
warning: this function's return value is unnecessarily wrapped by `Result`
help: for further information visit https://rust-lang.github.io/rust-clippy/rust-1.93.0/index.html#unnecessary_wraps
This commit is contained in:
Gwen Lg
2026-02-02 17:34:32 +01:00
committed by Alex
parent b060a7e0f1
commit 83f8bdbacd
7 changed files with 33 additions and 35 deletions

View File

@@ -186,6 +186,7 @@ doc_markdown = "warn"
manual_midpoint = "warn"
semicolon_if_nothing_returned = "warn"
unnecessary_semicolon = "warn"
unnecessary_wraps = "warn"
# nursery lints configuration
# or_fun_call = "warn" # enable it to help detect non trivial code used in `_or` method

View File

@@ -126,7 +126,7 @@ fn handle_http_precondition(
) -> Result<Option<Response<ResBody>>, Error> {
let precondition_headers = PreconditionHeaders::parse(req)?;
if let Some(status_code) = precondition_headers.check(version, &version_meta.etag)? {
if let Some(status_code) = precondition_headers.check(version, &version_meta.etag) {
let mut response = object_headers(
version,
version_meta,
@@ -877,7 +877,7 @@ impl PreconditionHeaders {
})
}
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
fn check(&self, v: &ObjectVersion, etag: &str) -> Option<StatusCode> {
// we store date with ms precision, but headers are precise to the second: truncate
// the timestamp to handle the same-second edge case
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
@@ -887,32 +887,32 @@ impl PreconditionHeaders {
if let Some(im) = &self.if_match {
// Step 1: if-match is present
if !im.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
return Some(StatusCode::PRECONDITION_FAILED);
}
} else if let Some(ius) = &self.if_unmodified_since {
// Step 2: if-unmodified-since is present, and if-match is absent
if v_date > *ius {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
return Some(StatusCode::PRECONDITION_FAILED);
}
}
if let Some(inm) = &self.if_none_match {
// Step 3: if-none-match is present
if inm.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::NOT_MODIFIED));
return Some(StatusCode::NOT_MODIFIED);
}
} else if let Some(ims) = &self.if_modified_since {
// Step 4: if-modified-since is present, and if-none-match is absent
if v_date <= *ims {
return Ok(Some(StatusCode::NOT_MODIFIED));
return Some(StatusCode::NOT_MODIFIED);
}
}
Ok(None)
None
}
pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
match self.check(v, etag)? {
match self.check(v, etag) {
Some(_) => Err(Error::PreconditionFailed),
None => Ok(()),
}

View File

@@ -296,7 +296,7 @@ pub async fn handle_list_parts(
},
);
let (info, next) = fetch_part_info(query, &mpu)?;
let (info, next) = fetch_part_info(query, &mpu);
let result = s3_xml::ListPartsResult {
xmlns: (),
@@ -526,7 +526,7 @@ where
fn fetch_part_info<'a>(
query: &ListPartsQuery,
mpu: &'a MultipartUpload,
) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> {
) -> (Vec<PartInfo<'a>>, Option<u64>) {
assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs
// Parse multipart upload part list, removing parts not yet finished
@@ -565,10 +565,10 @@ fn fetch_part_info<'a>(
if parts.len() > query.max_parts as usize {
parts.truncate(query.max_parts as usize);
let pagination = Some(parts.last().unwrap().part_number);
return Ok((parts, pagination));
return (parts, pagination);
}
Ok((parts, None))
(parts, None)
}
/*
@@ -1255,7 +1255,7 @@ mod tests {
}
#[test]
fn test_fetch_part_info() -> Result<(), Error> {
fn test_fetch_part_info() {
let mut query = ListPartsQuery {
bucket_name: "a".to_string(),
key: "a".to_string(),
@@ -1267,7 +1267,7 @@ mod tests {
let mpu = mpu();
// Start from the beginning but with limited size to trigger pagination
let (info, pagination) = fetch_part_info(&query, &mpu)?;
let (info, pagination) = fetch_part_info(&query, &mpu);
assert_eq!(pagination.unwrap(), 3);
assert_eq!(
info,
@@ -1291,7 +1291,7 @@ mod tests {
// Use previous pagination to make a new request
query.part_number_marker = Some(pagination.unwrap());
let (info, pagination) = fetch_part_info(&query, &mpu)?;
let (info, pagination) = fetch_part_info(&query, &mpu);
assert!(pagination.is_none());
assert_eq!(
info,
@@ -1315,14 +1315,14 @@ mod tests {
// Trying to access a part that is way larger than registered ones
query.part_number_marker = Some(9999);
let (info, pagination) = fetch_part_info(&query, &mpu)?;
let (info, pagination) = fetch_part_info(&query, &mpu);
assert!(pagination.is_none());
assert_eq!(info, vec![]);
// Try without any limitation
query.max_parts = 1000;
query.part_number_marker = None;
let (info, pagination) = fetch_part_info(&query, &mpu)?;
let (info, pagination) = fetch_part_info(&query, &mpu);
assert!(pagination.is_none());
assert_eq!(
info,
@@ -1357,7 +1357,5 @@ mod tests {
},
]
);
Ok(())
}
}

View File

@@ -5,6 +5,7 @@ mod telemetry {
use garage_util::data::Uuid;
use garage_util::error::Error;
#[expect(clippy::unnecessary_wraps)]
pub fn init_tracing(_: &str, _: Uuid) -> Result<(), Error> {
error!("Garage was built without OTLP exporter, admin.trace_sink is ignored.");
Ok(())

View File

@@ -2,7 +2,6 @@ use std::cmp::min;
use std::collections::HashMap;
use garage_util::crdt::Crdt;
use garage_util::error::*;
use crate::layout::*;
use crate::replication_mode::ReplicationFactor;
@@ -21,14 +20,14 @@ use crate::replication_mode::ReplicationFactor;
// number of tokens by zone : (A, 4), (B,1), (C,4), (D, 4), (E, 2)
// With these parameters, the naive algo fails, whereas there is a solution:
// (A,A,C,D,E) , (A,B,C,D,D) (A,C,C,D,E)
fn check_against_naive(cl: &LayoutVersion) -> Result<bool, Error> {
fn check_against_naive(cl: &LayoutVersion) -> bool {
let over_size = cl.partition_size + 1;
let mut zone_token = HashMap::<String, usize>::new();
let (zones, zone_to_id) = cl.generate_nongateway_zone_ids()?;
let (zones, zone_to_id) = cl.generate_nongateway_zone_ids();
if zones.is_empty() {
return Ok(false);
return false;
}
for z in zones.iter() {
@@ -66,7 +65,7 @@ fn check_against_naive(cl: &LayoutVersion) -> Result<bool, Error> {
{
curr_zone += 1;
if curr_zone >= zones.len() {
return Ok(true);
return true;
}
}
id_zone_token[curr_zone] -= 1;
@@ -77,7 +76,7 @@ fn check_against_naive(cl: &LayoutVersion) -> Result<bool, Error> {
}
}
Ok(false)
false
}
fn show_msg(msg: &Message) {
@@ -127,7 +126,7 @@ fn test_assignment() {
let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap();
show_msg(&msg);
assert_eq!(cl.check(), Ok(()));
assert!(check_against_naive(cl.current()).unwrap());
assert!(check_against_naive(cl.current()));
node_capacity_vec = vec![4000, 1000, 1000, 3000, 1000, 1000, 2000, 10000, 2000];
node_zone_vec = vec!["A", "B", "C", "C", "C", "B", "G", "H", "I"];
@@ -136,7 +135,7 @@ fn test_assignment() {
let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap();
show_msg(&msg);
assert_eq!(cl.check(), Ok(()));
assert!(check_against_naive(cl.current()).unwrap());
assert!(check_against_naive(cl.current()));
node_capacity_vec = vec![4000, 1000, 2000, 7000, 1000, 1000, 2000, 10000, 2000];
update_layout(&mut cl, &node_capacity_vec, &node_zone_vec, 3);
@@ -144,7 +143,7 @@ fn test_assignment() {
let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap();
show_msg(&msg);
assert_eq!(cl.check(), Ok(()));
assert!(check_against_naive(cl.current()).unwrap());
assert!(check_against_naive(cl.current()));
node_capacity_vec = vec![
4000000, 4000000, 2000000, 7000000, 1000000, 9000000, 2000000, 10000, 2000000,
@@ -154,5 +153,5 @@ fn test_assignment() {
let (cl, msg) = cl.apply_staged_changes(v + 1).unwrap();
show_msg(&msg);
assert_eq!(cl.check(), Ok(()));
assert!(check_against_naive(cl.current()).unwrap());
assert!(check_against_naive(cl.current()));
}

View File

@@ -271,7 +271,7 @@ impl LayoutVersion {
// Check that the partition size stored is the one computed by the asignation
// algorithm.
let cl2 = self.clone();
let (_, zone_to_id) = cl2.generate_nongateway_zone_ids().unwrap();
let (_, zone_to_id) = cl2.generate_nongateway_zone_ids();
match cl2.compute_optimal_partition_size(&zone_to_id, zone_redundancy) {
Ok(s) if s != self.partition_size => {
return Err(format!(
@@ -330,7 +330,7 @@ impl LayoutVersion {
// We generate for once numerical ids for the zones of non gateway nodes,
// to use them as indices in the flow graphs.
let (id_to_zone, zone_to_id) = self.generate_nongateway_zone_ids()?;
let (id_to_zone, zone_to_id) = self.generate_nongateway_zone_ids();
if self.nongateway_nodes().len() < self.replication_factor {
return Err(Error::Message(format!(
@@ -489,9 +489,7 @@ impl LayoutVersion {
/// This function generates ids for the zone of the nodes appearing in
/// `self.node_id_vec`.
pub(crate) fn generate_nongateway_zone_ids(
&self,
) -> Result<(Vec<String>, HashMap<String, usize>), Error> {
pub(crate) fn generate_nongateway_zone_ids(&self) -> (Vec<String>, HashMap<String, usize>) {
let mut id_to_zone = Vec::<String>::new();
let mut zone_to_id = HashMap::<String, usize>::new();
@@ -502,7 +500,7 @@ impl LayoutVersion {
id_to_zone.push(r.zone.clone());
}
}
Ok((id_to_zone, zone_to_id))
(id_to_zone, zone_to_id)
}
/// This function computes by dichotomy the largest realizable partition size, given

View File

@@ -305,6 +305,7 @@ fn default_consistency_mode() -> String {
"consistent".into()
}
#[expect(clippy::unnecessary_wraps)]
fn default_compression() -> Option<i32> {
Some(1)
}