net: Refactor obtain_response and split devtools (#44271)

This PR refactors parts of the net crate with one minor functional
change.
Most of the main functions in the net crate are quite long and rather
unwieldly. This PR tries to help make them more understandable.

- Split parts of obtain_response to have the Router callback setup in
another function.
- Move functions related to devtools into another file.
- Add some servo_tracing.
- http_network_or_cache_fetch has another function for append_cache_data
to headers.
- One functional change: previously in obtain_response, we used the
encoded_url via copies and multiple replace calls. We now use the
percent_encode crate which is already included in
content_security_policy to do this a bit more efficiently. In practice
the compiler probably fixed the multiple copies but this is more
straightforward. The output should be identical.

Testing: As this is mostly a refactor compilation is the test. The
percent_encode change is tested in multiple unit tests.

---------

Signed-off-by: Narfinger <Narfinger@users.noreply.github.com>
This commit is contained in:
Narfinger
2026-04-17 18:29:19 +02:00
committed by GitHub
parent 9259a7ed69
commit 68ca2808ee
7 changed files with 537 additions and 481 deletions

2
Cargo.lock generated
View File

@@ -8486,6 +8486,7 @@ dependencies = [
"servo-paint-api",
"servo-pixels",
"servo-profile-traits",
"servo-tracing",
"servo-url",
"servo_arc",
"sha2",
@@ -8495,6 +8496,7 @@ dependencies = [
"tokio-stream",
"tokio-util",
"tower",
"tracing",
"tungstenite",
"url",
"uuid",

View File

@@ -18,6 +18,7 @@ doctest = false
[features]
test-util = ["hyper-util/server-graceful", "dep:servo-default-resources"]
tracing = ["dep:tracing"]
[dependencies]
async-compression = { version = "0.4.12", default-features = false, features = ["brotli", "gzip", "tokio", "zlib", "zstd"] }
@@ -70,8 +71,10 @@ servo_arc = { workspace = true }
servo-base = { workspace = true }
servo-config = { workspace = true }
servo-default-resources = { workspace = true, optional = true }
servo-tracing = { workspace = true }
servo-url = { workspace = true }
sha2 = { workspace = true }
tracing = { workspace = true, optional = true }
time = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread", "sync"] }
tokio-rustls = { workspace = true }

195
components/net/devtools.rs Normal file
View File

@@ -0,0 +1,195 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use crossbeam_channel::Sender;
use devtools_traits::{
ChromeToDevtoolsControlMsg, DevtoolsControlMsg, HttpRequest as DevtoolsHttpRequest,
HttpResponse as DevtoolsHttpResponse, NetworkEvent, SecurityInfoUpdate,
};
use http::{HeaderMap, Method};
use hyper_serde::Serde;
use log::error;
use net_traits::http_status::HttpStatus;
use net_traits::request::{Destination, Request};
use net_traits::response::{CacheState, Response};
use net_traits::{DebugVec, FetchMetadata};
use servo_base::id::{BrowsingContextId, PipelineId};
use servo_url::ServoUrl;
use crate::fetch::methods::FetchContext;
#[allow(clippy::too_many_arguments)]
pub(crate) fn prepare_devtools_request(
request_id: String,
url: ServoUrl,
method: Method,
headers: HeaderMap,
body: Option<Vec<u8>>,
pipeline_id: PipelineId,
connect_time: Duration,
send_time: Duration,
destination: Destination,
is_xhr: bool,
browsing_context_id: BrowsingContextId,
) -> ChromeToDevtoolsControlMsg {
let started_date_time = SystemTime::now();
let request = DevtoolsHttpRequest {
url,
method,
headers,
body: body.map(DebugVec::from),
pipeline_id,
started_date_time,
time_stamp: started_date_time
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64,
connect_time,
send_time,
destination,
is_xhr,
browsing_context_id,
};
let net_event = NetworkEvent::HttpRequestUpdate(request);
ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event)
}
pub(crate) fn send_request_to_devtools(
msg: ChromeToDevtoolsControlMsg,
devtools_chan: &Sender<DevtoolsControlMsg>,
) {
if matches!(msg, ChromeToDevtoolsControlMsg::NetworkEvent(_, ref network_event) if !network_event.forward_to_devtools())
{
return;
}
if let Err(e) = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg)) {
error!("DevTools send failed: {e}");
}
}
pub(crate) fn send_response_to_devtools(
request: &Request,
context: &FetchContext,
response: &Response,
body_data: Option<Vec<u8>>,
) {
let meta = match response.metadata() {
Ok(FetchMetadata::Unfiltered(m)) => m,
Ok(FetchMetadata::Filtered { unsafe_, .. }) => unsafe_,
Err(_) => {
log::warn!("No metadata available, skipping devtools response.");
return;
},
};
send_response_values_to_devtools(
meta.headers.map(Serde::into_inner),
meta.status,
body_data,
response.cache_state,
request,
context.devtools_chan.clone(),
);
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn send_response_values_to_devtools(
headers: Option<HeaderMap>,
status: HttpStatus,
body: Option<Vec<u8>>,
cache_state: CacheState,
request: &Request,
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
) {
if let (Some(devtools_chan), Some(pipeline_id), Some(webview_id)) = (
devtools_chan,
request.pipeline_id,
request.target_webview_id,
) {
let browsing_context_id = webview_id.into();
let from_cache = matches!(cache_state, CacheState::Local | CacheState::Validated);
let devtoolsresponse = DevtoolsHttpResponse {
headers,
status,
body: body.map(DebugVec::from),
from_cache,
pipeline_id,
browsing_context_id,
};
let net_event_response = NetworkEvent::HttpResponse(devtoolsresponse);
let msg =
ChromeToDevtoolsControlMsg::NetworkEvent(request.id.0.to_string(), net_event_response);
let _ = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg));
}
}
pub(crate) fn send_security_info_to_devtools(
request: &Request,
context: &FetchContext,
response: &Response,
) {
let meta = match response.metadata() {
Ok(FetchMetadata::Unfiltered(m)) => m,
Ok(FetchMetadata::Filtered { unsafe_, .. }) => unsafe_,
Err(_) => {
log::warn!("No metadata available, skipping devtools security info.");
return;
},
};
if let (Some(devtools_chan), Some(security_info), Some(webview_id)) = (
context.devtools_chan.clone(),
meta.tls_security_info,
request.target_webview_id,
) {
let update = NetworkEvent::SecurityInfo(SecurityInfoUpdate {
browsing_context_id: webview_id.into(),
security_info: Some(security_info),
});
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request.id.0.to_string(), update);
let _ = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg));
}
}
pub(crate) fn send_early_httprequest_to_devtools(request: &Request, context: &FetchContext) {
// Do not forward data requests to devtools
if request.url().scheme() == "data" {
return;
}
if let (Some(devtools_chan), Some(browsing_context_id), Some(pipeline_id)) = (
context.devtools_chan.as_ref(),
request.target_webview_id.map(|id| id.into()),
request.pipeline_id,
) {
// Build the partial DevtoolsHttpRequest
let devtools_request = DevtoolsHttpRequest {
url: request.current_url(),
method: request.method.clone(),
headers: request.headers.clone(),
body: None,
pipeline_id,
started_date_time: SystemTime::now(),
time_stamp: 0,
connect_time: Duration::from_millis(0),
send_time: Duration::from_millis(0),
destination: request.destination,
is_xhr: false,
browsing_context_id,
};
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(
request.id.0.to_string(),
NetworkEvent::HttpRequest(devtools_request),
);
send_request_to_devtools(msg, devtools_chan);
}
}

View File

@@ -47,15 +47,15 @@ use tokio::sync::Mutex as TokioMutex;
use tokio::sync::mpsc::{UnboundedReceiver as TokioReceiver, UnboundedSender as TokioSender};
use crate::connector::CACertificates;
use crate::devtools::{
send_early_httprequest_to_devtools, send_response_to_devtools, send_security_info_to_devtools,
};
use crate::fetch::cors_cache::CorsCache;
use crate::fetch::fetch_params::{
ConsumePreloadedResources, FetchParams, SharedPreloadedResources,
};
use crate::filemanager_thread::FileManager;
use crate::http_loader::{
HttpState, determine_requests_referrer, http_fetch, send_early_httprequest_to_devtools,
send_response_to_devtools, send_security_info_to_devtools, set_default_accept,
};
use crate::http_loader::{HttpState, determine_requests_referrer, http_fetch, set_default_accept};
use crate::protocols::{ProtocolRegistry, is_url_potentially_trustworthy};
use crate::request_interceptor::RequestInterceptor;
use crate::subresource_integrity::is_response_integrity_valid;

View File

@@ -5,14 +5,11 @@
use std::collections::HashSet;
use std::iter::FromIterator;
use std::sync::Arc as StdArc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use std::time::{Duration, SystemTime};
use async_recursion::async_recursion;
use crossbeam_channel::Sender;
use devtools_traits::{
ChromeToDevtoolsControlMsg, DevtoolsControlMsg, HttpRequest as DevtoolsHttpRequest,
HttpResponse as DevtoolsHttpResponse, NetworkEvent, SecurityInfoUpdate,
};
use content_security_policy::percent_encoding::{AsciiSet, CONTROLS, utf8_percent_encode};
use devtools_traits::ChromeToDevtoolsControlMsg;
use embedder_traits::{AuthenticationResponse, GenericEmbedderProxy};
use futures::{TryFutureExt, TryStreamExt, future};
use headers::authorization::Basic;
@@ -34,7 +31,6 @@ use hyper::Response as HyperResponse;
use hyper::body::{Bytes, Frame};
use hyper::ext::ReasonPhrase;
use hyper::header::{HeaderName, TRANSFER_ENCODING};
use hyper_serde::Serde;
use ipc_channel::IpcError;
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
@@ -57,13 +53,15 @@ use net_traits::response::{
CacheState, HttpsState, RedirectTaint, Response, ResponseBody, ResponseType,
};
use net_traits::{
CookieSource, DOCUMENT_ACCEPT_HEADER_VALUE, DebugVec, FetchMetadata, NetworkError,
RedirectEndValue, RedirectStartValue, ReferrerPolicy, ResourceAttribute, ResourceFetchTiming,
ResourceTimeValue, TlsSecurityInfo, TlsSecurityState,
CookieSource, DOCUMENT_ACCEPT_HEADER_VALUE, NetworkError, RedirectEndValue, RedirectStartValue,
ReferrerPolicy, ResourceAttribute, ResourceFetchTiming, ResourceTimeValue, TlsSecurityInfo,
TlsSecurityState,
};
use parking_lot::{Mutex, RwLock};
use profile_traits::mem::{Report, ReportKind};
use profile_traits::path;
#[cfg(feature = "tracing")]
use profile_traits::trace_span;
use rustc_hash::FxHashMap;
use servo_arc::Arc;
use servo_base::cross_process_instant::CrossProcessInstant;
@@ -75,6 +73,8 @@ use tokio::sync::mpsc::{
unbounded_channel,
};
use tokio_stream::wrappers::ReceiverStream;
#[cfg(feature = "tracing")]
use tracing::Instrument;
use crate::async_runtime::spawn_task;
use crate::connector::{
@@ -83,6 +83,9 @@ use crate::connector::{
use crate::cookie::ServoCookie;
use crate::cookie_storage::CookieStorage;
use crate::decoder::Decoder;
use crate::devtools::{
prepare_devtools_request, send_request_to_devtools, send_response_values_to_devtools,
};
use crate::embedder::NetToEmbedderMsg;
use crate::fetch::cors_cache::CorsCache;
use crate::fetch::fetch_params::FetchParams;
@@ -405,179 +408,6 @@ fn build_tls_security_info(handshake: &TlsHandshakeInfo, hsts_enabled: bool) ->
}
}
#[allow(clippy::too_many_arguments)]
fn prepare_devtools_request(
request_id: String,
url: ServoUrl,
method: Method,
headers: HeaderMap,
body: Option<Vec<u8>>,
pipeline_id: PipelineId,
connect_time: Duration,
send_time: Duration,
destination: Destination,
is_xhr: bool,
browsing_context_id: BrowsingContextId,
) -> ChromeToDevtoolsControlMsg {
let started_date_time = SystemTime::now();
let request = DevtoolsHttpRequest {
url,
method,
headers,
body: body.map(DebugVec::from),
pipeline_id,
started_date_time,
time_stamp: started_date_time
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64,
connect_time,
send_time,
destination,
is_xhr,
browsing_context_id,
};
let net_event = NetworkEvent::HttpRequestUpdate(request);
ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event)
}
pub fn send_request_to_devtools(
msg: ChromeToDevtoolsControlMsg,
devtools_chan: &Sender<DevtoolsControlMsg>,
) {
if matches!(msg, ChromeToDevtoolsControlMsg::NetworkEvent(_, ref network_event) if !network_event.forward_to_devtools())
{
return;
}
if let Err(e) = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg)) {
error!("DevTools send failed: {e}");
}
}
pub fn send_response_to_devtools(
request: &Request,
context: &FetchContext,
response: &Response,
body_data: Option<Vec<u8>>,
) {
let meta = match response.metadata() {
Ok(FetchMetadata::Unfiltered(m)) => m,
Ok(FetchMetadata::Filtered { unsafe_, .. }) => unsafe_,
Err(_) => {
log::warn!("No metadata available, skipping devtools response.");
return;
},
};
send_response_values_to_devtools(
meta.headers.map(Serde::into_inner),
meta.status,
body_data,
response.cache_state,
request,
context.devtools_chan.clone(),
);
}
#[allow(clippy::too_many_arguments)]
pub fn send_response_values_to_devtools(
headers: Option<HeaderMap>,
status: HttpStatus,
body: Option<Vec<u8>>,
cache_state: CacheState,
request: &Request,
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
) {
if let (Some(devtools_chan), Some(pipeline_id), Some(webview_id)) = (
devtools_chan,
request.pipeline_id,
request.target_webview_id,
) {
let browsing_context_id = webview_id.into();
let from_cache = matches!(cache_state, CacheState::Local | CacheState::Validated);
let devtoolsresponse = DevtoolsHttpResponse {
headers,
status,
body: body.map(DebugVec::from),
from_cache,
pipeline_id,
browsing_context_id,
};
let net_event_response = NetworkEvent::HttpResponse(devtoolsresponse);
let msg =
ChromeToDevtoolsControlMsg::NetworkEvent(request.id.0.to_string(), net_event_response);
let _ = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg));
}
}
pub fn send_security_info_to_devtools(
request: &Request,
context: &FetchContext,
response: &Response,
) {
let meta = match response.metadata() {
Ok(FetchMetadata::Unfiltered(m)) => m,
Ok(FetchMetadata::Filtered { unsafe_, .. }) => unsafe_,
Err(_) => {
log::warn!("No metadata available, skipping devtools security info.");
return;
},
};
if let (Some(devtools_chan), Some(security_info), Some(webview_id)) = (
context.devtools_chan.clone(),
meta.tls_security_info,
request.target_webview_id,
) {
let update = NetworkEvent::SecurityInfo(SecurityInfoUpdate {
browsing_context_id: webview_id.into(),
security_info: Some(security_info),
});
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request.id.0.to_string(), update);
let _ = devtools_chan.send(DevtoolsControlMsg::FromChrome(msg));
}
}
pub fn send_early_httprequest_to_devtools(request: &Request, context: &FetchContext) {
// Do not forward data requests to devtools
if request.url().scheme() == "data" {
return;
}
if let (Some(devtools_chan), Some(browsing_context_id), Some(pipeline_id)) = (
context.devtools_chan.as_ref(),
request.target_webview_id.map(|id| id.into()),
request.pipeline_id,
) {
// Build the partial DevtoolsHttpRequest
let devtools_request = DevtoolsHttpRequest {
url: request.current_url(),
method: request.method.clone(),
headers: request.headers.clone(),
body: None,
pipeline_id,
started_date_time: SystemTime::now(),
time_stamp: 0,
connect_time: Duration::from_millis(0),
send_time: Duration::from_millis(0),
destination: request.destination,
is_xhr: false,
browsing_context_id,
};
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(
request.id.0.to_string(),
NetworkEvent::HttpRequest(devtools_request),
);
send_request_to_devtools(msg, devtools_chan);
}
}
fn auth_from_cache(
auth_cache: &RwLock<AuthCache>,
origin: &ImmutableOrigin,
@@ -669,13 +499,17 @@ fn log_fetch_terminated_send_failure(terminated_with_error: bool, context: &str)
);
}
const FRAGMENT: &AsciiSet = &CONTROLS.add(b'|').add(b'{').add(b'}');
#[allow(clippy::too_many_arguments)]
#[servo_tracing::instrument(skip_all, fields(url))]
/// This sets up the callback infrastructure to send body frames to `body_sender` and fires the client request.
async fn obtain_response(
client: &ServoClient,
url: &ServoUrl,
method: &Method,
request_headers: &mut HeaderMap,
body: Option<StdArc<Mutex<Option<IpcSender<BodyChunkRequest>>>>>,
body_sender: Option<StdArc<Mutex<Option<IpcSender<BodyChunkRequest>>>>>,
source_is_null: bool,
pipeline_id: &Option<PipelineId>,
request_id: Option<&str>,
@@ -685,281 +519,293 @@ async fn obtain_response(
fetch_terminated: UnboundedSender<bool>,
browsing_context_id: Option<BrowsingContextId>,
) -> Result<(HyperResponse<Decoder>, Option<ChromeToDevtoolsControlMsg>), NetworkError> {
{
let mut headers = request_headers.clone();
let mut headers = request_headers.clone();
let devtools_bytes = StdArc::new(Mutex::new(vec![]));
let devtools_bytes = StdArc::new(Mutex::new(vec![]));
// https://url.spec.whatwg.org/#percent-encoded-bytes
let encoded_url = url
.clone()
.into_url()
.as_ref()
.replace('|', "%7C")
.replace('{', "%7B")
.replace('}', "%7D");
// https://url.spec.whatwg.org/#percent-encoded-bytes
let encoded_url = utf8_percent_encode(url.as_str(), FRAGMENT).to_string();
let request = if let Some(chunk_requester) = body {
let (sink, stream) = if source_is_null {
// Step 4.2 of https://fetch.spec.whatwg.org/#concept-http-network-fetch
// TODO: this should not be set for HTTP/2(currently not supported?).
headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
let request = if let Some(chunk_requester) = body_sender {
let (sink, stream) = if source_is_null {
// Step 4.2 of https://fetch.spec.whatwg.org/#concept-http-network-fetch
// TODO: this should not be set for HTTP/2(currently not supported?).
headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
let (sender, receiver) = channel(1);
(BodySink::Chunked(sender), BodyStream::Chunked(receiver))
} else {
// Note: Hyper seems to already buffer bytes when the request appears not stream-able,
// see https://github.com/hyperium/hyper/issues/2232#issuecomment-644322104
//
// However since this doesn't appear documented, and we're using an ancient version,
// for now we buffer manually to ensure we don't stream requests
// to servers that might not know how to handle them.
let (sender, receiver) = unbounded_channel();
(BodySink::Buffered(sender), BodyStream::Buffered(receiver))
};
let (body_chan, body_port) = ipc::channel().unwrap();
{
let mut lock = chunk_requester.lock();
if let Some(chunk_requester) = lock.as_mut() {
if let Err(error) = chunk_requester.send(BodyChunkRequest::Connect(body_chan)) {
log_request_body_stream_closed(
"connect to the request body stream",
Some(&error),
);
return Err(request_body_stream_closed_error(
"connect to the request body stream",
));
}
// https://fetch.spec.whatwg.org/#concept-request-transmit-body
// Request the first chunk, corresponding to Step 3 and 4.
if let Err(error) = chunk_requester.send(BodyChunkRequest::Chunk) {
log_request_body_stream_closed(
"request the first request body chunk",
Some(&error),
);
return Err(request_body_stream_closed_error(
"request the first request body chunk",
));
}
} else {
log_request_body_stream_closed("connect to the request body stream", None);
return Err(request_body_stream_closed_error(
"connect to the request body stream",
));
}
}
let devtools_bytes = devtools_bytes.clone();
let chunk_requester2 = chunk_requester.clone();
ROUTER.add_typed_route(
body_port,
Box::new(move |message| {
info!("Received message");
let bytes = match message.unwrap() {
BodyChunkResponse::Chunk(bytes) => bytes,
BodyChunkResponse::Done => {
// Step 3, abort these parallel steps.
if fetch_terminated.send(false).is_err() {
log_fetch_terminated_send_failure(
false,
"handling request body completion",
);
}
sink.close();
return;
},
BodyChunkResponse::Error => {
// Step 4 and/or 5.
// TODO: differentiate between the two steps,
// where step 5 requires setting an `aborted` flag on the fetch.
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling request body stream error",
);
}
sink.close();
return;
},
};
devtools_bytes.lock().extend_from_slice(&bytes);
// Step 5.1.2.2, transmit chunk over the network,
// currently implemented by sending the bytes to the fetch worker.
sink.transmit_bytes(bytes);
// Step 5.1.2.3
// Request the next chunk.
let mut chunk_requester2 = chunk_requester2.lock();
if let Some(chunk_requester2) = chunk_requester2.as_mut() {
if let Err(error) = chunk_requester2.send(BodyChunkRequest::Chunk) {
log_request_body_stream_closed(
"request the next request body chunk",
Some(&error),
);
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling failure to request the next request body chunk",
);
}
sink.close();
}
} else {
log_request_body_stream_closed("request the next request body chunk", None);
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling a closed request body stream while requesting the next chunk",
);
}
sink.close();
}
}),
);
let body = match stream {
BodyStream::Chunked(receiver) => {
let stream = ReceiverStream::new(receiver);
BoxBody::new(http_body_util::StreamBody::new(stream))
},
BodyStream::Buffered(mut receiver) => {
// Accumulate bytes received over IPC into a vector.
let mut body = vec![];
loop {
match receiver.recv().await {
Some(BodyChunk::Chunk(bytes)) => {
body.extend_from_slice(&bytes);
},
Some(BodyChunk::Done) => break,
None => warn!("Failed to read all chunks from request body."),
}
}
Full::new(body.into()).map_err(|_| unreachable!()).boxed()
},
};
HyperRequest::builder()
.method(method)
.uri(encoded_url)
.body(body)
let (sender, receiver) = channel(1);
(BodySink::Chunked(sender), BodyStream::Chunked(receiver))
} else {
HyperRequest::builder()
.method(method)
.uri(encoded_url)
.body(
http_body_util::Empty::new()
.map_err(|_| unreachable!())
.boxed(),
)
// Note: Hyper seems to already buffer bytes when the request appears not stream-able,
// see https://github.com/hyperium/hyper/issues/2232#issuecomment-644322104
//
// However since this doesn't appear documented, and we're using an ancient version,
// for now we buffer manually to ensure we don't stream requests
// to servers that might not know how to handle them.
let (sender, receiver) = unbounded_channel();
(BodySink::Buffered(sender), BodyStream::Buffered(receiver))
};
context
.timing
.lock()
.set_attribute(ResourceAttribute::DomainLookupStart);
obtain_response_setup_router_callback(
devtools_bytes.clone(),
chunk_requester,
sink,
fetch_terminated,
)?;
// TODO(#21261) connect_start: set if a persistent connection is *not* used and the last non-redirected
// fetch passes the timing allow check
let connect_start = CrossProcessInstant::now();
context
.timing
.lock()
.set_attribute(ResourceAttribute::ConnectStart(connect_start));
// TODO: We currently don't know when the handhhake before the connection is done
// so our best bet would be to set `secure_connection_start` here when we are currently
// fetching on a HTTPS url.
if url.scheme() == "https" {
context
.timing
.lock()
.set_attribute(ResourceAttribute::SecureConnectionStart);
}
let mut request = match request {
Ok(request) => request,
Err(error) => return Err(NetworkError::HttpError(error.to_string())),
let body = match stream {
BodyStream::Chunked(receiver) => {
let stream = ReceiverStream::new(receiver);
BoxBody::new(http_body_util::StreamBody::new(stream))
},
BodyStream::Buffered(mut receiver) => {
// Accumulate bytes received over IPC into a vector.
let mut body = vec![];
loop {
match receiver.recv().await {
Some(BodyChunk::Chunk(bytes)) => {
body.extend_from_slice(&bytes);
},
Some(BodyChunk::Done) => break,
None => warn!("Failed to read all chunks from request body."),
}
}
Full::new(body.into()).map_err(|_| unreachable!()).boxed()
},
};
*request.headers_mut() = headers.clone();
HyperRequest::builder()
.method(method)
.uri(encoded_url)
.body(body)
} else {
HyperRequest::builder()
.method(method)
.uri(encoded_url)
.body(
http_body_util::Empty::new()
.map_err(|_| unreachable!())
.boxed(),
)
};
let connect_end = CrossProcessInstant::now();
context
.timing
.lock()
.set_attribute(ResourceAttribute::DomainLookupStart);
// TODO(#21261) connect_start: set if a persistent connection is *not* used and the last non-redirected
// fetch passes the timing allow check
let connect_start = CrossProcessInstant::now();
context
.timing
.lock()
.set_attribute(ResourceAttribute::ConnectStart(connect_start));
// TODO: We currently don't know when the handhhake before the connection is done
// so our best bet would be to set `secure_connection_start` here when we are currently
// fetching on a HTTPS url.
if url.scheme() == "https" {
context
.timing
.lock()
.set_attribute(ResourceAttribute::ConnectEnd(connect_end));
.set_attribute(ResourceAttribute::SecureConnectionStart);
}
let request_id = request_id.map(|v| v.to_owned());
let pipeline_id = *pipeline_id;
let closure_url = url.clone();
let method = method.clone();
let send_start = CrossProcessInstant::now();
let mut request = match request {
Ok(request) => request,
Err(error) => return Err(NetworkError::HttpError(error.to_string())),
};
*request.headers_mut() = headers.clone();
let host = request.uri().host().unwrap_or("").to_owned();
let override_manager = context.state.override_manager.clone();
let headers = headers.clone();
let is_secure_scheme = url.is_secure_scheme();
let connect_end = CrossProcessInstant::now();
context
.timing
.lock()
.set_attribute(ResourceAttribute::ConnectEnd(connect_end));
client
.request(request)
.and_then(move |res| {
let send_end = CrossProcessInstant::now();
let request_id = request_id.map(|v| v.to_owned());
let pipeline_id = *pipeline_id;
let closure_url = url.clone();
let method = method.clone();
let send_start = CrossProcessInstant::now();
// TODO(#21271) response_start: immediately after receiving first byte of response
let host = request.uri().host().unwrap_or("").to_owned();
let override_manager = context.state.override_manager.clone();
let headers = headers.clone();
let is_secure_scheme = url.is_secure_scheme();
let msg = if let Some(request_id) = request_id {
if let Some(pipeline_id) = pipeline_id {
if let Some(browsing_context_id) = browsing_context_id {
Some(prepare_devtools_request(
request_id,
closure_url,
method.clone(),
headers,
Some(devtools_bytes.lock().clone()),
pipeline_id,
(connect_end - connect_start).unsigned_abs(),
(send_end - send_start).unsigned_abs(),
destination,
is_xhr,
browsing_context_id,
))
} else {
debug!("Not notifying devtools (no browsing_context_id)");
None
}
// TODO: ^This is not right, connect_start is taken before contructing the
// request and connect_end at the end of it. send_start is takend before the
// connection too. I'm not sure it's currently possible to get the time at the
// point between the connection and the start of a request.
let client_future = client
.request(request)
.and_then(move |res| {
let send_end = CrossProcessInstant::now();
// TODO(#21271) response_start: immediately after receiving first byte of response
let msg = if let Some(request_id) = request_id {
if let Some(pipeline_id) = pipeline_id {
if let Some(browsing_context_id) = browsing_context_id {
Some(prepare_devtools_request(
request_id,
closure_url,
method.clone(),
headers,
Some(devtools_bytes.lock().clone()),
pipeline_id,
(connect_end - connect_start).unsigned_abs(),
(send_end - send_start).unsigned_abs(),
destination,
is_xhr,
browsing_context_id,
))
} else {
debug!("Not notifying devtools (no pipeline_id)");
debug!("Not notifying devtools (no browsing_context_id)");
None
}
// TODO: ^This is not right, connect_start is taken before contructing the
// request and connect_end at the end of it. send_start is takend before the
// connection too. I'm not sure it's currently possible to get the time at the
// point between the connection and the start of a request.
} else {
debug!("Not notifying devtools (no request_id)");
debug!("Not notifying devtools (no pipeline_id)");
None
};
}
} else {
debug!("Not notifying devtools (no request_id)");
None
};
future::ready(Ok((
Decoder::detect(res.map(|r| r.boxed()), is_secure_scheme),
msg,
)))
})
.map_err(move |error| {
warn!("network error: {error:?}");
NetworkError::from_hyper_error(
&error,
override_manager.remove_certificate_failing_verification(host.as_str()),
)
})
.await
future::ready(Ok((
Decoder::detect(res.map(|r| r.boxed()), is_secure_scheme),
msg,
)))
})
.map_err(move |error| {
warn!("network error: {error:?}");
NetworkError::from_hyper_error(
&error,
override_manager.remove_certificate_failing_verification(host.as_str()),
)
});
#[cfg(feature = "tracing")]
{
client_future.instrument(trace_span!("HyperRequest")).await
}
#[cfg(not(feature = "tracing"))]
{
client_future.await
}
}
/// Setup the callback mechanism to forward chunks from the request received to the `chunk_requester`.
fn obtain_response_setup_router_callback(
devtools_bytes: StdArc<Mutex<Vec<u8>>>,
chunk_requester: StdArc<Mutex<Option<IpcSender<BodyChunkRequest>>>>,
sink: BodySink,
fetch_terminated: UnboundedSender<bool>,
) -> Result<(), NetworkError> {
let (body_chan, body_port) = ipc::channel().unwrap();
{
let mut lock = chunk_requester.lock();
if let Some(chunk_requester) = lock.as_mut() {
if let Err(error) = chunk_requester.send(BodyChunkRequest::Connect(body_chan)) {
log_request_body_stream_closed("connect to the request body stream", Some(&error));
return Err(request_body_stream_closed_error(
"connect to the request body stream",
));
}
// https://fetch.spec.whatwg.org/#concept-request-transmit-body
// Request the first chunk, corresponding to Step 3 and 4.
if let Err(error) = chunk_requester.send(BodyChunkRequest::Chunk) {
log_request_body_stream_closed(
"request the first request body chunk",
Some(&error),
);
return Err(request_body_stream_closed_error(
"request the first request body chunk",
));
}
} else {
log_request_body_stream_closed("connect to the request body stream", None);
return Err(request_body_stream_closed_error(
"connect to the request body stream",
));
}
}
ROUTER.add_typed_route(
body_port,
Box::new(move |message| {
info!("Received message");
let bytes = match message.unwrap() {
BodyChunkResponse::Chunk(bytes) => bytes,
BodyChunkResponse::Done => {
// Step 3, abort these parallel steps.
if fetch_terminated.send(false).is_err() {
log_fetch_terminated_send_failure(
false,
"handling request body completion",
);
}
sink.close();
return;
},
BodyChunkResponse::Error => {
// Step 4 and/or 5.
// TODO: differentiate between the two steps,
// where step 5 requires setting an `aborted` flag on the fetch.
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling request body stream error",
);
}
sink.close();
return;
},
};
devtools_bytes.lock().extend_from_slice(&bytes);
// Step 5.1.2.2, transmit chunk over the network,
// currently implemented by sending the bytes to the fetch worker.
sink.transmit_bytes(bytes);
// Step 5.1.2.3
// Request the next chunk.
let mut chunk_requester = chunk_requester.lock();
if let Some(chunk_requester) = chunk_requester.as_mut() {
if let Err(error) = chunk_requester.send(BodyChunkRequest::Chunk) {
log_request_body_stream_closed(
"request the next request body chunk",
Some(&error),
);
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling failure to request the next request body chunk",
);
}
sink.close();
}
} else {
log_request_body_stream_closed("request the next request body chunk", None);
if fetch_terminated.send(true).is_err() {
log_fetch_terminated_send_failure(
true,
"handling a closed request body stream while requesting the next chunk",
);
}
sink.close();
}
}),
);
Ok(())
}
/// [HTTP fetch](https://fetch.spec.whatwg.org/#concept-http-fetch)
@@ -1407,6 +1253,7 @@ pub async fn http_redirect_fetch(
/// [HTTP network or cache fetch](https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch)
#[async_recursion]
#[servo_tracing::instrument(skip_all,field(url=fetch_params.request.url()))]
async fn http_network_or_cache_fetch(
fetch_params: &mut FetchParams,
authentication_fetch_flag: bool,
@@ -1577,48 +1424,7 @@ async fn http_network_or_cache_fetch(
}
// Steps 8.16 to 8.18
match http_request.cache_mode {
// Step 8.16: If httpRequests cache mode is "default" and httpRequests header list
// contains `If-Modified-Since`, `If-None-Match`, `If-Unmodified-Since`, `If-Match`, or
// `If-Range`, then set httpRequests cache mode to "no-store".
CacheMode::Default if is_no_store_cache(&http_request.headers) => {
http_request.cache_mode = CacheMode::NoStore;
},
// Note that the following steps (8.17 and 8.18) are being considered for removal:
// https://github.com/whatwg/fetch/issues/722#issuecomment-1420264615
// Step 8.17: If httpRequests cache mode is "no-cache", httpRequests prevent no-cache
// cache-control header modification flag is unset, and httpRequests header list does not
// contain `Cache-Control`, then append (`Cache-Control`, `max-age=0`) to httpRequests
// header list.
// TODO: Implement request's prevent no-cache cache-control header modification flag
// https://fetch.spec.whatwg.org/#no-cache-prevent-cache-control
CacheMode::NoCache if !http_request.headers.contains_key(header::CACHE_CONTROL) => {
http_request
.headers
.typed_insert(CacheControl::new().with_max_age(Duration::from_secs(0)));
},
// Step 8.18: If httpRequests cache mode is "no-store" or "reload", then:
CacheMode::Reload | CacheMode::NoStore => {
// Step 8.18.1: If httpRequests header list does not contain `Pragma`, then append
// (`Pragma`, `no-cache`) to httpRequests header list.
if !http_request.headers.contains_key(header::PRAGMA) {
http_request.headers.typed_insert(Pragma::no_cache());
}
// Step 8.18.2: If httpRequests header list does not contain `Cache-Control`, then
// append (`Cache-Control`, `no-cache`) to httpRequests header list.
if !http_request.headers.contains_key(header::CACHE_CONTROL) {
http_request
.headers
.typed_insert(CacheControl::new().with_no_cache());
}
},
_ => {},
}
append_cache_data_to_headers(http_request);
// Step 8.19: If httpRequests header list contains `Range`, then append (`Accept-Encoding`,
// `identity`) to httpRequests header list.
@@ -1914,6 +1720,7 @@ async fn http_network_or_cache_fetch(
// Step 18. Return response.
response
}
/// If the cache is not ready to construct a response, wait.
///
/// The cache is not ready if a previous fetch checked the cache, found nothing,
@@ -1921,7 +1728,7 @@ async fn http_network_or_cache_fetch(
///
/// Note that this is a different workflow from the one involving `wait_for_cached_response`.
/// That one happens when a fetch gets a cache hit, and the resource is pending completion from the network.
///
#[servo_tracing::instrument(skip_all)]
async fn block_for_cache_ready<'a>(
context: &'a FetchContext,
http_request: &mut Request,
@@ -2102,6 +1909,7 @@ impl Drop for ResponseEndTimer {
}
/// [HTTP network fetch](https://fetch.spec.whatwg.org/#http-network-fetch)
#[servo_tracing::instrument(skip_all,fields(url=fetch_params.request.url().as_str()))]
async fn http_network_fetch(
fetch_params: &mut FetchParams,
credentials_flag: bool,
@@ -2826,6 +2634,52 @@ fn append_the_fetch_metadata_headers(r: &mut Request) {
set_the_sec_fetch_user_header(r);
}
/// Steps 8.16 to 8.18 in [HTTP network or cache fetch](https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch)
fn append_cache_data_to_headers(http_request: &mut Request) {
match http_request.cache_mode {
// Step 8.16: If httpRequests cache mode is "default" and httpRequests header list
// contains `If-Modified-Since`, `If-None-Match`, `If-Unmodified-Since`, `If-Match`, or
// `If-Range`, then set httpRequests cache mode to "no-store".
CacheMode::Default if is_no_store_cache(&http_request.headers) => {
http_request.cache_mode = CacheMode::NoStore;
},
// Note that the following steps (8.17 and 8.18) are being considered for removal:
// https://github.com/whatwg/fetch/issues/722#issuecomment-1420264615
// Step 8.17: If httpRequests cache mode is "no-cache", httpRequests prevent no-cache
// cache-control header modification flag is unset, and httpRequests header list does not
// contain `Cache-Control`, then append (`Cache-Control`, `max-age=0`) to httpRequests
// header list.
// TODO: Implement request's prevent no-cache cache-control header modification flag
// https://fetch.spec.whatwg.org/#no-cache-prevent-cache-control
CacheMode::NoCache if !http_request.headers.contains_key(header::CACHE_CONTROL) => {
http_request
.headers
.typed_insert(CacheControl::new().with_max_age(Duration::from_secs(0)));
},
// Step 8.18: If httpRequests cache mode is "no-store" or "reload", then:
CacheMode::Reload | CacheMode::NoStore => {
// Step 8.18.1: If httpRequests header list does not contain `Pragma`, then append
// (`Pragma`, `no-cache`) to httpRequests header list.
if !http_request.headers.contains_key(header::PRAGMA) {
http_request.headers.typed_insert(Pragma::no_cache());
}
// Step 8.18.2: If httpRequests header list does not contain `Cache-Control`, then
// append (`Cache-Control`, `no-cache`) to httpRequests header list.
if !http_request.headers.contains_key(header::CACHE_CONTROL) {
http_request
.headers
.typed_insert(CacheControl::new().with_no_cache());
}
},
_ => {},
}
}
/// <https://w3c.github.io/webappsec-fetch-metadata/#abstract-opdef-set-dest>
fn set_the_sec_fetch_dest_header(r: &mut Request) {
// Step 1. Assert: rs url is a potentially trustworthy URL.

View File

@@ -9,6 +9,7 @@ pub mod connector;
pub mod cookie;
pub mod cookie_storage;
mod decoder;
mod devtools;
pub mod embedder;
pub mod filemanager_thread;
mod hosts;

View File

@@ -57,6 +57,7 @@ tracing = [
"layout/tracing",
"profile_traits/tracing",
"script/tracing",
"net/tracing",
]
vello = ["servo-constellation/vello"]
webgl_backtrace = [