mirror of
https://github.com/servo/servo
synced 2026-04-25 17:15:48 +02:00
The origin of a `file` URL is unspecified. Engines act like they're opaque except in a few special cases - one of which is the "is potentially trustworthy" algorithm. This change allows consumers of `servo-url` to distinguish between regular opaque origins and file origins. Then we use that to mark file origins as "potentially trustworthy" which is what the spec wants. For now we can get away without changes to the `url` crate (the one used in the wider ecosystem, not just servo), but I'm unsure if that will be the case in the future. Testing: This change adds a test Fixes: https://github.com/servo/servo/issues/42540 --------- Signed-off-by: Simon Wülker <simon.wuelker@arcor.de>
1425 lines
55 KiB
Rust
1425 lines
55 KiB
Rust
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||
|
||
use std::sync::Arc;
|
||
use std::sync::atomic::{AtomicBool, Ordering};
|
||
use std::{io, mem, str};
|
||
|
||
use base64::Engine as _;
|
||
use base64::engine::general_purpose;
|
||
use content_security_policy as csp;
|
||
use crossbeam_channel::Sender;
|
||
use devtools_traits::DevtoolsControlMsg;
|
||
use embedder_traits::resources::{self, Resource};
|
||
use headers::{AccessControlExposeHeaders, ContentType, HeaderMapExt};
|
||
use http::header::{self, HeaderMap, HeaderName, RANGE};
|
||
use http::{HeaderValue, Method, StatusCode};
|
||
use ipc_channel::ipc::{self, IpcSender};
|
||
use log::{debug, trace, warn};
|
||
use malloc_size_of_derive::MallocSizeOf;
|
||
use mime::{self, Mime};
|
||
use net_traits::fetch::headers::{determine_nosniff, extract_mime_type_as_mime};
|
||
use net_traits::filemanager_thread::{FileTokenCheck, RelativePos};
|
||
use net_traits::http_status::HttpStatus;
|
||
use net_traits::policy_container::{PolicyContainer, RequestPolicyContainer};
|
||
use net_traits::request::{
|
||
BodyChunkRequest, BodyChunkResponse, CredentialsMode, Destination, Initiator,
|
||
InsecureRequestsPolicy, Origin, ParserMetadata, RedirectMode, Referrer, Request, RequestBody,
|
||
RequestId, RequestMode, ResponseTainting, is_cors_safelisted_method,
|
||
is_cors_safelisted_request_header,
|
||
};
|
||
use net_traits::response::{Response, ResponseBody, ResponseType, TerminationReason};
|
||
use net_traits::{
|
||
FetchTaskTarget, NetworkError, ReferrerPolicy, ResourceAttribute, ResourceFetchTiming,
|
||
ResourceTimeValue, ResourceTimingType, WebSocketDomAction, WebSocketNetworkEvent,
|
||
set_default_accept_language,
|
||
};
|
||
use parking_lot::Mutex;
|
||
use rustc_hash::FxHashMap;
|
||
use rustls_pki_types::CertificateDer;
|
||
use serde::{Deserialize, Serialize};
|
||
use servo_arc::Arc as ServoArc;
|
||
use servo_base::generic_channel::CallbackSetter;
|
||
use servo_base::id::PipelineId;
|
||
use servo_url::{Host, ServoUrl};
|
||
use tokio::sync::Mutex as TokioMutex;
|
||
use tokio::sync::mpsc::{UnboundedReceiver as TokioReceiver, UnboundedSender as TokioSender};
|
||
|
||
use crate::connector::CACertificates;
|
||
use crate::fetch::cors_cache::CorsCache;
|
||
use crate::fetch::fetch_params::{
|
||
ConsumePreloadedResources, FetchParams, SharedPreloadedResources,
|
||
};
|
||
use crate::filemanager_thread::FileManager;
|
||
use crate::http_loader::{
|
||
HttpState, determine_requests_referrer, http_fetch, send_early_httprequest_to_devtools,
|
||
send_response_to_devtools, send_security_info_to_devtools, set_default_accept,
|
||
};
|
||
use crate::protocols::{ProtocolRegistry, is_url_potentially_trustworthy};
|
||
use crate::request_interceptor::RequestInterceptor;
|
||
use crate::subresource_integrity::is_response_integrity_valid;
|
||
|
||
pub type Target<'a> = &'a mut (dyn FetchTaskTarget + Send);
|
||
|
||
#[derive(Clone, Deserialize, Serialize)]
|
||
pub enum Data {
|
||
Payload(Vec<u8>),
|
||
Done,
|
||
Cancelled,
|
||
Error(NetworkError),
|
||
}
|
||
|
||
pub struct WebSocketChannel {
|
||
pub sender: IpcSender<WebSocketNetworkEvent>,
|
||
pub receiver: Option<CallbackSetter<WebSocketDomAction>>,
|
||
}
|
||
|
||
impl WebSocketChannel {
|
||
pub fn new(
|
||
sender: IpcSender<WebSocketNetworkEvent>,
|
||
receiver: Option<CallbackSetter<WebSocketDomAction>>,
|
||
) -> Self {
|
||
Self { sender, receiver }
|
||
}
|
||
}
|
||
|
||
/// Used to keep track of keep-alive requests
|
||
#[derive(Clone, Debug, Deserialize, MallocSizeOf, Serialize)]
|
||
pub struct InFlightKeepAliveRecord {
|
||
pub(crate) request_id: RequestId,
|
||
/// Used to keep track of size of keep-alive requests.
|
||
pub(crate) keep_alive_body_length: u64,
|
||
}
|
||
|
||
pub type SharedInflightKeepAliveRecords =
|
||
Arc<Mutex<FxHashMap<PipelineId, Vec<InFlightKeepAliveRecord>>>>;
|
||
|
||
#[derive(Clone)]
|
||
pub struct FetchContext {
|
||
pub state: Arc<HttpState>,
|
||
pub user_agent: String,
|
||
pub devtools_chan: Option<Sender<DevtoolsControlMsg>>,
|
||
pub filemanager: FileManager,
|
||
pub file_token: FileTokenCheck,
|
||
pub request_interceptor: Arc<TokioMutex<RequestInterceptor>>,
|
||
pub cancellation_listener: Arc<CancellationListener>,
|
||
pub timing: ServoArc<Mutex<ResourceFetchTiming>>,
|
||
pub protocols: Arc<ProtocolRegistry>,
|
||
pub websocket_chan: Option<Arc<Mutex<WebSocketChannel>>>,
|
||
pub ca_certificates: CACertificates<'static>,
|
||
pub ignore_certificate_errors: bool,
|
||
pub preloaded_resources: SharedPreloadedResources,
|
||
pub in_flight_keep_alive_records: SharedInflightKeepAliveRecords,
|
||
}
|
||
|
||
#[derive(Default)]
|
||
pub struct CancellationListener {
|
||
cancelled: AtomicBool,
|
||
}
|
||
|
||
impl CancellationListener {
|
||
pub(crate) fn cancelled(&self) -> bool {
|
||
self.cancelled.load(Ordering::Relaxed)
|
||
}
|
||
|
||
pub(crate) fn cancel(&self) {
|
||
self.cancelled.store(true, Ordering::Relaxed)
|
||
}
|
||
}
|
||
|
||
/// Closes the current process request body sender state when the net side fetch invocation ends.
|
||
/// Redirect replay for navigation requests happens in a later fetch invocation with a newly
|
||
/// deserialized "RequestBody", so each invocation owns closing only its local copy.
|
||
pub(crate) struct AutoRequestBodyStreamCloser {
|
||
body: Option<RequestBody>,
|
||
}
|
||
|
||
impl AutoRequestBodyStreamCloser {
|
||
pub(crate) fn new(body: Option<&RequestBody>) -> Self {
|
||
Self {
|
||
body: body.cloned(),
|
||
}
|
||
}
|
||
|
||
pub(crate) fn disarm(&mut self) {
|
||
self.body = None;
|
||
}
|
||
}
|
||
|
||
impl Drop for AutoRequestBodyStreamCloser {
|
||
fn drop(&mut self) {
|
||
if let Some(body) = self.body.take() {
|
||
body.close_stream();
|
||
}
|
||
}
|
||
}
|
||
|
||
/// A manual navigation redirect keeps the same request body alive for a later net side redirect
|
||
/// replay invocation. That later invocation becomes the next lifecycle owner and must close its
|
||
/// local shared sender state once it reaches a terminal response.
|
||
pub(crate) fn transfers_request_body_stream_to_later_manual_redirect(
|
||
request: &Request,
|
||
response: &Response,
|
||
) -> bool {
|
||
request.mode == RequestMode::Navigate &&
|
||
request.redirect_mode == RedirectMode::Manual &&
|
||
request.body.is_some() &&
|
||
!response.is_network_error() &&
|
||
response
|
||
.actual_response()
|
||
.status
|
||
.try_code()
|
||
.is_some_and(|status| status.is_redirection())
|
||
}
|
||
|
||
pub type DoneChannel = Option<(TokioSender<Data>, TokioReceiver<Data>)>;
|
||
|
||
/// [Fetch](https://fetch.spec.whatwg.org#concept-fetch)
|
||
pub async fn fetch(request: Request, target: Target<'_>, context: &FetchContext) -> Response {
|
||
// Steps 7,4 of https://w3c.github.io/resource-timing/#processing-model
|
||
// rev order okay since spec says they're equal - https://w3c.github.io/resource-timing/#dfn-starttime
|
||
{
|
||
let mut timing_guard = context.timing.lock();
|
||
timing_guard.set_attribute(ResourceAttribute::FetchStart);
|
||
timing_guard.set_attribute(ResourceAttribute::StartTime(ResourceTimeValue::FetchStart));
|
||
}
|
||
fetch_with_cors_cache(request, &mut CorsCache::default(), target, context).await
|
||
}
|
||
|
||
/// Continuation of fetch from step 8.
|
||
///
|
||
/// <https://fetch.spec.whatwg.org#concept-fetch>
|
||
pub async fn fetch_with_cors_cache(
|
||
request: Request,
|
||
cache: &mut CorsCache,
|
||
target: Target<'_>,
|
||
context: &FetchContext,
|
||
) -> Response {
|
||
// Step 8. Let fetchParams be a new fetch params whose request is request
|
||
let mut fetch_params = FetchParams::new(request);
|
||
// Each net side fetch invocation owns closing its local deserialized request-body sender state
|
||
// once this function returns, even if navigation redirect replay later starts a new fetch with
|
||
// a fresh "RequestBody" copy.
|
||
let mut request_body_stream_closer =
|
||
AutoRequestBodyStreamCloser::new(fetch_params.request.body.as_ref());
|
||
let request = &mut fetch_params.request;
|
||
|
||
// Step 4. Populate request from client given request.
|
||
request.populate_request_from_client();
|
||
|
||
// Step 5. If request’s client is non-null, then:
|
||
// TODO
|
||
// Step 5.1. Set taskDestination to request’s client’s global object.
|
||
// TODO
|
||
// Step 5.2. Set crossOriginIsolatedCapability to request’s client’s cross-origin isolated capability.
|
||
// TODO
|
||
|
||
// Step 10. If all of the following conditions are true:
|
||
if
|
||
// - request’s URL’s scheme is an HTTP(S) scheme
|
||
matches!(request.current_url().scheme(), "http" | "https")
|
||
// - request’s mode is "same-origin", "cors", or "no-cors"
|
||
&& matches!(request.mode, RequestMode::SameOrigin | RequestMode::CorsMode | RequestMode::NoCors)
|
||
// - request’s method is `GET`
|
||
&& matches!(request.method, Method::GET)
|
||
// - request’s unsafe-request flag is not set or request’s header list is empty
|
||
&& (!request.unsafe_request || request.headers.is_empty())
|
||
{
|
||
// - request’s client is not null, and request’s client’s global object is a Window object
|
||
if let Some(client) = request.client.as_ref() {
|
||
// Step 10.1. Assert: request’s origin is same origin with request’s client’s origin.
|
||
assert!(request.origin == client.origin);
|
||
// Step 10.2. Let onPreloadedResponseAvailable be an algorithm that runs the
|
||
// following step given a response response: set fetchParams’s preloaded response candidate to response.
|
||
// Step 10.3. Let foundPreloadedResource be the result of invoking consume a preloaded resource
|
||
// for request’s client, given request’s URL, request’s destination, request’s mode,
|
||
// request’s credentials mode, request’s integrity metadata, and onPreloadedResponseAvailable.
|
||
// Step 10.4. If foundPreloadedResource is true and fetchParams’s preloaded response candidate is null,
|
||
// then set fetchParams’s preloaded response candidate to "pending".
|
||
if let Some(candidate) =
|
||
client.consume_preloaded_resource(request, context.preloaded_resources.clone())
|
||
{
|
||
fetch_params.preload_response_candidate = candidate;
|
||
}
|
||
}
|
||
}
|
||
|
||
// Step 11. If request’s header list does not contain `Accept`, then:
|
||
set_default_accept(request);
|
||
|
||
// Step 12. If request’s header list does not contain `Accept-Language`, then user agents should
|
||
// append (`Accept-Language, an appropriate header value) to request’s header list.
|
||
set_default_accept_language(&mut request.headers);
|
||
|
||
// Step 15. If request’s internal priority is null, then use request’s priority, initiator,
|
||
// destination, and render-blocking in an implementation-defined manner to set request’s
|
||
// internal priority to an implementation-defined object.
|
||
// TODO: figure out what a Priority object is.
|
||
|
||
// Step 15. If request is a subresource request:
|
||
//
|
||
// We only check for keep-alive requests here, since that's currently the only usage
|
||
let should_track_in_flight_record = request.keep_alive && request.is_subresource_request();
|
||
let pipeline_id = request.pipeline_id;
|
||
|
||
if should_track_in_flight_record {
|
||
// Step 15.1. Let record be a new fetch record whose request is request
|
||
// and controller is fetchParams’s controller.
|
||
let record = InFlightKeepAliveRecord {
|
||
request_id: request.id,
|
||
keep_alive_body_length: request.keep_alive_body_length(),
|
||
};
|
||
// Step 15.2. Append record to request’s client’s fetch group’s fetch records.
|
||
let mut in_flight_records = context.in_flight_keep_alive_records.lock();
|
||
in_flight_records
|
||
.entry(pipeline_id.expect("Must always set a pipeline ID for keep-alive requests"))
|
||
.or_default()
|
||
.push(record);
|
||
};
|
||
let request_id = request.id;
|
||
|
||
// Step 17: Run main fetch given fetchParams.
|
||
let response = main_fetch(&mut fetch_params, cache, false, target, &mut None, context).await;
|
||
|
||
if transfers_request_body_stream_to_later_manual_redirect(&fetch_params.request, &response) {
|
||
request_body_stream_closer.disarm();
|
||
}
|
||
|
||
// Mimics <https://fetch.spec.whatwg.org/#done-flag>
|
||
if should_track_in_flight_record {
|
||
context
|
||
.in_flight_keep_alive_records
|
||
.lock()
|
||
.get_mut(&pipeline_id.expect("Must always set a pipeline ID for keep-alive requests"))
|
||
.expect("Must always have initialized tracked requests before starting fetch")
|
||
.retain(|record| record.request_id != request_id);
|
||
}
|
||
|
||
// Step 18: Return fetchParams’s controller.
|
||
// TODO: We don't implement fetchParams as defined in the spec
|
||
response
|
||
}
|
||
|
||
pub(crate) fn convert_request_to_csp_request(request: &Request) -> Option<csp::Request> {
|
||
let origin = match &request.origin {
|
||
Origin::Client => return None,
|
||
Origin::Origin(origin) => origin,
|
||
};
|
||
|
||
let csp_request = csp::Request {
|
||
url: request.url().into_url(),
|
||
current_url: request.current_url().into_url(),
|
||
origin: origin.clone().into_url_origin(),
|
||
redirect_count: request.redirect_count,
|
||
destination: request.destination,
|
||
initiator: match request.initiator {
|
||
Initiator::Download => csp::Initiator::Download,
|
||
Initiator::ImageSet => csp::Initiator::ImageSet,
|
||
Initiator::Manifest => csp::Initiator::Manifest,
|
||
Initiator::Prefetch => csp::Initiator::Prefetch,
|
||
_ => csp::Initiator::None,
|
||
},
|
||
nonce: request.cryptographic_nonce_metadata.clone(),
|
||
integrity_metadata: request.integrity_metadata.clone(),
|
||
parser_metadata: match request.parser_metadata {
|
||
ParserMetadata::ParserInserted => csp::ParserMetadata::ParserInserted,
|
||
ParserMetadata::NotParserInserted => csp::ParserMetadata::NotParserInserted,
|
||
ParserMetadata::Default => csp::ParserMetadata::None,
|
||
},
|
||
};
|
||
Some(csp_request)
|
||
}
|
||
|
||
/// <https://www.w3.org/TR/CSP/#should-block-request>
|
||
pub fn should_request_be_blocked_by_csp(
|
||
csp_request: &csp::Request,
|
||
policy_container: &PolicyContainer,
|
||
) -> (csp::CheckResult, Vec<csp::Violation>) {
|
||
policy_container
|
||
.csp_list
|
||
.as_ref()
|
||
.map(|c| c.should_request_be_blocked(csp_request))
|
||
.unwrap_or((csp::CheckResult::Allowed, Vec::new()))
|
||
}
|
||
|
||
/// <https://www.w3.org/TR/CSP/#report-for-request>
|
||
pub fn report_violations_for_request_by_csp(
|
||
csp_request: &csp::Request,
|
||
policy_container: &PolicyContainer,
|
||
) -> Vec<csp::Violation> {
|
||
policy_container
|
||
.csp_list
|
||
.as_ref()
|
||
.map(|c| c.report_violations_for_request(csp_request))
|
||
.unwrap_or_default()
|
||
}
|
||
|
||
fn should_response_be_blocked_by_csp(
|
||
csp_request: &csp::Request,
|
||
response: &Response,
|
||
policy_container: &PolicyContainer,
|
||
) -> (csp::CheckResult, Vec<csp::Violation>) {
|
||
if response.is_network_error() {
|
||
return (csp::CheckResult::Allowed, Vec::new());
|
||
}
|
||
let csp_response = csp::Response {
|
||
url: response
|
||
.actual_response()
|
||
.url()
|
||
.cloned()
|
||
// NOTE(pylbrecht): for WebSocket connections, the URL scheme is converted to http(s)
|
||
// to integrate with fetch(). We need to convert it back to ws(s) to get valid CSP
|
||
// checks.
|
||
// https://github.com/w3c/webappsec-csp/issues/532
|
||
.map(|mut url| {
|
||
match csp_request.url.scheme() {
|
||
"ws" | "wss" => {
|
||
url.as_mut_url()
|
||
.set_scheme(csp_request.url.scheme())
|
||
.expect("failed to set URL scheme");
|
||
},
|
||
_ => {},
|
||
};
|
||
url
|
||
})
|
||
.expect("response must have a url")
|
||
.into_url(),
|
||
redirect_count: csp_request.redirect_count,
|
||
};
|
||
policy_container
|
||
.csp_list
|
||
.as_ref()
|
||
.map(|c| c.should_response_to_request_be_blocked(csp_request, &csp_response))
|
||
.unwrap_or((csp::CheckResult::Allowed, Vec::new()))
|
||
}
|
||
|
||
/// [Main fetch](https://fetch.spec.whatwg.org/#concept-main-fetch)
|
||
pub async fn main_fetch(
|
||
fetch_params: &mut FetchParams,
|
||
cache: &mut CorsCache,
|
||
recursive_flag: bool,
|
||
target: Target<'_>,
|
||
done_chan: &mut DoneChannel,
|
||
context: &FetchContext,
|
||
) -> Response {
|
||
// Step 1: Let request be fetchParam's request.
|
||
let request = &mut fetch_params.request;
|
||
send_early_httprequest_to_devtools(request, context);
|
||
// Step 2: Let response be null.
|
||
let mut response = None;
|
||
|
||
// Servo internal: return a crash error when a crash error page is needed
|
||
if let Some(ref details) = request.crash {
|
||
response = Some(Response::network_error(NetworkError::Crash(
|
||
details.clone(),
|
||
)));
|
||
}
|
||
|
||
// Step 3: If request’s local-URLs-only flag is set and request’s
|
||
// current URL is not local, then set response to a network error.
|
||
if request.local_urls_only &&
|
||
!matches!(
|
||
request.current_url().scheme(),
|
||
"about" | "blob" | "data" | "filesystem"
|
||
)
|
||
{
|
||
response = Some(Response::network_error(NetworkError::UnsupportedScheme));
|
||
}
|
||
|
||
// The request should have a valid policy_container associated with it.
|
||
let policy_container = match &request.policy_container {
|
||
RequestPolicyContainer::Client => unreachable!(),
|
||
RequestPolicyContainer::PolicyContainer(container) => container.to_owned(),
|
||
};
|
||
let csp_request = convert_request_to_csp_request(request);
|
||
if let Some(csp_request) = csp_request.as_ref() {
|
||
// Step 2.2.
|
||
let violations = report_violations_for_request_by_csp(csp_request, &policy_container);
|
||
|
||
if !violations.is_empty() {
|
||
target.process_csp_violations(request, violations);
|
||
}
|
||
};
|
||
|
||
// Step 3.
|
||
// TODO: handle request abort.
|
||
|
||
// Step 4. Upgrade request to a potentially trustworthy URL, if appropriate.
|
||
if should_upgrade_request_to_potentially_trustworthy(request, context) ||
|
||
should_upgrade_mixed_content_request(request, &context.protocols)
|
||
{
|
||
trace!(
|
||
"upgrading {} targeting {:?}",
|
||
request.current_url(),
|
||
request.destination
|
||
);
|
||
if let Some(new_scheme) = match request.current_url().scheme() {
|
||
"http" => Some("https"),
|
||
"ws" => Some("wss"),
|
||
_ => None,
|
||
} {
|
||
request
|
||
.current_url_mut()
|
||
.as_mut_url()
|
||
.set_scheme(new_scheme)
|
||
.unwrap();
|
||
}
|
||
} else {
|
||
trace!(
|
||
"not upgrading {} targeting {:?} with {:?}",
|
||
request.current_url(),
|
||
request.destination,
|
||
request.insecure_requests_policy
|
||
);
|
||
}
|
||
if let Some(csp_request) = csp_request.as_ref() {
|
||
// Step 7. If should request be blocked due to a bad port, should fetching request be blocked
|
||
// as mixed content, or should request be blocked by Content Security Policy returns blocked,
|
||
// then set response to a network error.
|
||
let (check_result, violations) =
|
||
should_request_be_blocked_by_csp(csp_request, &policy_container);
|
||
|
||
if !violations.is_empty() {
|
||
target.process_csp_violations(request, violations);
|
||
}
|
||
|
||
if check_result == csp::CheckResult::Blocked {
|
||
warn!("Request blocked by CSP");
|
||
response = Some(Response::network_error(NetworkError::ContentSecurityPolicy))
|
||
}
|
||
};
|
||
if should_request_be_blocked_due_to_a_bad_port(&request.current_url()) {
|
||
response = Some(Response::network_error(NetworkError::InvalidPort));
|
||
}
|
||
if should_request_be_blocked_as_mixed_content(request, &context.protocols) {
|
||
response = Some(Response::network_error(NetworkError::MixedContent));
|
||
}
|
||
|
||
// Step 8: If request’s referrer policy is the empty string, then set request’s referrer policy
|
||
// to request’s policy container’s referrer policy.
|
||
if request.referrer_policy == ReferrerPolicy::EmptyString {
|
||
request.referrer_policy = policy_container.get_referrer_policy();
|
||
}
|
||
|
||
let referrer_url = match mem::replace(&mut request.referrer, Referrer::NoReferrer) {
|
||
Referrer::NoReferrer => None,
|
||
Referrer::ReferrerUrl(referrer_source) | Referrer::Client(referrer_source) => {
|
||
request.headers.remove(header::REFERER);
|
||
determine_requests_referrer(
|
||
request.referrer_policy,
|
||
referrer_source,
|
||
request.current_url(),
|
||
)
|
||
},
|
||
};
|
||
request.referrer = referrer_url.map_or(Referrer::NoReferrer, Referrer::ReferrerUrl);
|
||
|
||
// Step 9.
|
||
// TODO: handle FTP URLs.
|
||
|
||
// Step 10.
|
||
context
|
||
.state
|
||
.hsts_list
|
||
.read()
|
||
.apply_hsts_rules(request.current_url_mut());
|
||
|
||
// Step 11.
|
||
// Not applicable: see fetch_async.
|
||
|
||
let current_url = request.current_url();
|
||
let current_scheme = current_url.scheme();
|
||
|
||
// Intercept the request and maybe override the response.
|
||
context
|
||
.request_interceptor
|
||
.lock()
|
||
.await
|
||
.intercept_request(request, &mut response, context)
|
||
.await;
|
||
|
||
let mut response = match response {
|
||
Some(response) => response,
|
||
None => {
|
||
// Step 12. If response is null, then set response to the result
|
||
// of running the steps corresponding to the first matching statement:
|
||
let same_origin = if let Origin::Origin(ref origin) = request.origin {
|
||
*origin == request.current_url_with_blob_claim().origin()
|
||
} else {
|
||
false
|
||
};
|
||
|
||
// fetchParams’s preloaded response candidate is non-null
|
||
if let Some((response, preload_id)) =
|
||
fetch_params.preload_response_candidate.response().await
|
||
{
|
||
response.get_resource_timing().lock().preloaded = true;
|
||
context
|
||
.preloaded_resources
|
||
.lock()
|
||
.unwrap()
|
||
.remove(&preload_id);
|
||
response
|
||
}
|
||
// request's current URL's origin is same origin with request's origin, and request's
|
||
// response tainting is "basic"
|
||
else if (same_origin && request.response_tainting == ResponseTainting::Basic) ||
|
||
// request's current URL's scheme is "data"
|
||
current_scheme == "data" ||
|
||
// Note: Although it is not part of the specification, we make an exception here
|
||
// for custom protocols that are explicitly marked as active for fetch.
|
||
context.protocols.is_fetchable(current_scheme) ||
|
||
// request's mode is "navigate" or "websocket"
|
||
matches!(
|
||
request.mode,
|
||
RequestMode::Navigate | RequestMode::WebSocket { .. }
|
||
)
|
||
{
|
||
// Substep 1. Set request's response tainting to "basic".
|
||
request.response_tainting = ResponseTainting::Basic;
|
||
|
||
// Substep 2. Return the result of running scheme fetch given fetchParams.
|
||
scheme_fetch(fetch_params, cache, target, done_chan, context).await
|
||
} else if request.mode == RequestMode::SameOrigin {
|
||
Response::network_error(NetworkError::CrossOriginResponse)
|
||
} else if request.mode == RequestMode::NoCors {
|
||
// Substep 1. If request's redirect mode is not "follow", then return a network error.
|
||
if request.redirect_mode != RedirectMode::Follow {
|
||
Response::network_error(NetworkError::RedirectError)
|
||
} else {
|
||
// Substep 2. Set request's response tainting to "opaque".
|
||
request.response_tainting = ResponseTainting::Opaque;
|
||
|
||
// Substep 3. Return the result of running scheme fetch given fetchParams.
|
||
scheme_fetch(fetch_params, cache, target, done_chan, context).await
|
||
}
|
||
} else if !matches!(current_scheme, "http" | "https") {
|
||
Response::network_error(NetworkError::UnsupportedScheme)
|
||
} else if request.use_cors_preflight ||
|
||
(request.unsafe_request &&
|
||
(!is_cors_safelisted_method(&request.method) ||
|
||
request.headers.iter().any(|(name, value)| {
|
||
!is_cors_safelisted_request_header(&name, &value)
|
||
})))
|
||
{
|
||
// Substep 1.
|
||
request.response_tainting = ResponseTainting::CorsTainting;
|
||
// Substep 2.
|
||
let response = http_fetch(
|
||
fetch_params,
|
||
cache,
|
||
true,
|
||
true,
|
||
false,
|
||
target,
|
||
done_chan,
|
||
context,
|
||
)
|
||
.await;
|
||
// Substep 3.
|
||
if response.is_network_error() {
|
||
// TODO clear cache entries using request
|
||
}
|
||
// Substep 4.
|
||
response
|
||
} else {
|
||
// Substep 1.
|
||
request.response_tainting = ResponseTainting::CorsTainting;
|
||
// Substep 2.
|
||
http_fetch(
|
||
fetch_params,
|
||
cache,
|
||
true,
|
||
false,
|
||
false,
|
||
target,
|
||
done_chan,
|
||
context,
|
||
)
|
||
.await
|
||
}
|
||
},
|
||
};
|
||
|
||
// Step 13. If recursive is true, then return response.
|
||
if recursive_flag {
|
||
return response;
|
||
}
|
||
|
||
// reborrow request to avoid double mutable borrow
|
||
let request = &mut fetch_params.request;
|
||
|
||
// Step 14. If response is not a network error and response is not a filtered response, then:
|
||
let mut response = if !response.is_network_error() && response.internal_response.is_none() {
|
||
// Step 14.1 If request’s response tainting is "cors", then:
|
||
if request.response_tainting == ResponseTainting::CorsTainting {
|
||
// Step 14.1.1 Let headerNames be the result of extracting header list values given
|
||
// `Access-Control-Expose-Headers` and response’s header list.
|
||
let header_names: Option<Vec<HeaderName>> = response
|
||
.headers
|
||
.typed_get::<AccessControlExposeHeaders>()
|
||
.map(|v| v.iter().collect());
|
||
match header_names {
|
||
// Subsubstep 2.
|
||
Some(ref list)
|
||
if request.credentials_mode != CredentialsMode::Include &&
|
||
list.iter().any(|header| header == "*") =>
|
||
{
|
||
response.cors_exposed_header_name_list = response
|
||
.headers
|
||
.iter()
|
||
.map(|(name, _)| name.as_str().to_owned())
|
||
.collect();
|
||
},
|
||
// Subsubstep 3.
|
||
Some(list) => {
|
||
response.cors_exposed_header_name_list =
|
||
list.iter().map(|h| h.as_str().to_owned()).collect();
|
||
},
|
||
_ => (),
|
||
}
|
||
}
|
||
|
||
// Step 14.2 Set response to the following filtered response with response as its internal response,
|
||
// depending on request’s response tainting:
|
||
let response_type = match request.response_tainting {
|
||
ResponseTainting::Basic => ResponseType::Basic,
|
||
ResponseTainting::CorsTainting => ResponseType::Cors,
|
||
ResponseTainting::Opaque => ResponseType::Opaque,
|
||
};
|
||
response.to_filtered(response_type)
|
||
} else {
|
||
response
|
||
};
|
||
|
||
let internal_error = {
|
||
// Tests for steps 17 and 18, before step 15 for borrowing concerns.
|
||
let response_is_network_error = response.is_network_error();
|
||
let should_replace_with_nosniff_error = !response_is_network_error &&
|
||
should_be_blocked_due_to_nosniff(request.destination, &response.headers);
|
||
let should_replace_with_mime_type_error = !response_is_network_error &&
|
||
should_be_blocked_due_to_mime_type(request.destination, &response.headers);
|
||
let should_replace_with_mixed_content = !response_is_network_error &&
|
||
should_response_be_blocked_as_mixed_content(request, &response, &context.protocols);
|
||
let should_replace_with_csp_error = csp_request.is_some_and(|csp_request| {
|
||
let (check_result, violations) =
|
||
should_response_be_blocked_by_csp(&csp_request, &response, &policy_container);
|
||
if !violations.is_empty() {
|
||
target.process_csp_violations(request, violations);
|
||
}
|
||
check_result == csp::CheckResult::Blocked
|
||
});
|
||
|
||
// Step 15.
|
||
let mut network_error_response = response
|
||
.get_network_error()
|
||
.cloned()
|
||
.map(Response::network_error);
|
||
|
||
// Step 15. Let internalResponse be response, if response is a network error;
|
||
// otherwise response’s internal response.
|
||
let response_type = response.response_type.clone(); // Needed later after the mutable borrow
|
||
let internal_response = if let Some(error_response) = network_error_response.as_mut() {
|
||
error_response
|
||
} else {
|
||
response.actual_response_mut()
|
||
};
|
||
|
||
// Step 16. If internalResponse’s URL list is empty, then set it to a clone of request’s URL list.
|
||
if internal_response.url_list.is_empty() {
|
||
internal_response.url_list = request
|
||
.url_list
|
||
.iter()
|
||
.map(|locked_url| locked_url.url())
|
||
.collect();
|
||
}
|
||
|
||
// Step 17. Set internalResponse’s redirect taint to request’s redirect-taint.
|
||
internal_response.redirect_taint = request.redirect_taint_for_request();
|
||
|
||
// Step 19. If response is not a network error and any of the following returns blocked
|
||
// * should internalResponse to request be blocked as mixed content
|
||
// * should internalResponse to request be blocked by Content Security Policy
|
||
// * should internalResponse to request be blocked due to its MIME type
|
||
// * should internalResponse to request be blocked due to nosniff
|
||
let mut blocked_error_response;
|
||
|
||
let internal_response = if should_replace_with_nosniff_error {
|
||
// Defer rebinding result
|
||
blocked_error_response = Response::network_error(NetworkError::Nosniff);
|
||
&blocked_error_response
|
||
} else if should_replace_with_mime_type_error {
|
||
// Defer rebinding result
|
||
blocked_error_response =
|
||
Response::network_error(NetworkError::MimeType("Blocked by MIME type".into()));
|
||
&blocked_error_response
|
||
} else if should_replace_with_mixed_content {
|
||
blocked_error_response = Response::network_error(NetworkError::MixedContent);
|
||
&blocked_error_response
|
||
} else if should_replace_with_csp_error {
|
||
blocked_error_response = Response::network_error(NetworkError::ContentSecurityPolicy);
|
||
&blocked_error_response
|
||
} else {
|
||
internal_response
|
||
};
|
||
|
||
// Step 20. If response’s type is "opaque", internalResponse’s status is 206, internalResponse’s
|
||
// range-requested flag is set, and request’s header list does not contain `Range`, then set
|
||
// response and internalResponse to a network error.
|
||
// Also checking if internal response is a network error to prevent crash from attemtping to
|
||
// read status of a network error if we blocked the request above.
|
||
let internal_response = if !internal_response.is_network_error() &&
|
||
response_type == ResponseType::Opaque &&
|
||
internal_response.status.code() == StatusCode::PARTIAL_CONTENT &&
|
||
internal_response.range_requested &&
|
||
!request.headers.contains_key(RANGE)
|
||
{
|
||
// Defer rebinding result
|
||
blocked_error_response =
|
||
Response::network_error(NetworkError::PartialResponseToNonRangeRequestError);
|
||
&blocked_error_response
|
||
} else {
|
||
internal_response
|
||
};
|
||
|
||
// Step 21. If response is not a network error and either request’s method is `HEAD` or `CONNECT`,
|
||
// or internalResponse’s status is a null body status, set internalResponse’s body to null and
|
||
// disregard any enqueuing toward it (if any).
|
||
// NOTE: We check `internal_response` since we did not mutate `response` in the previous steps.
|
||
let not_network_error = !response_is_network_error && !internal_response.is_network_error();
|
||
if not_network_error &&
|
||
(is_null_body_status(&internal_response.status) ||
|
||
matches!(request.method, Method::HEAD | Method::CONNECT))
|
||
{
|
||
// when Fetch is used only asynchronously, we will need to make sure
|
||
// that nothing tries to write to the body at this point
|
||
let mut body = internal_response.body.lock();
|
||
*body = ResponseBody::Empty;
|
||
}
|
||
|
||
internal_response.get_network_error().cloned()
|
||
};
|
||
|
||
// Execute deferred rebinding of response.
|
||
let mut response = if let Some(error) = internal_error {
|
||
Response::network_error(error)
|
||
} else {
|
||
response
|
||
};
|
||
|
||
// Step 19. If response is not a network error and any of the following returns blocked
|
||
let mut response_loaded = false;
|
||
let mut response = if !response.is_network_error() && !request.integrity_metadata.is_empty() {
|
||
// Step 19.1.
|
||
wait_for_response(request, &mut response, target, done_chan, context).await;
|
||
response_loaded = true;
|
||
|
||
// Step 19.2.
|
||
let integrity_metadata = &request.integrity_metadata;
|
||
if response.termination_reason.is_none() &&
|
||
!is_response_integrity_valid(integrity_metadata, &response)
|
||
{
|
||
Response::network_error(NetworkError::SubresourceIntegrity)
|
||
} else {
|
||
response
|
||
}
|
||
} else {
|
||
response
|
||
};
|
||
|
||
// Step 20.
|
||
if request.synchronous {
|
||
// process_response is not supposed to be used
|
||
// by sync fetch, but we overload it here for simplicity
|
||
target.process_response(request, &response);
|
||
if !response_loaded {
|
||
wait_for_response(request, &mut response, target, done_chan, context).await;
|
||
}
|
||
// overloaded similarly to process_response
|
||
target.process_response_eof(request, &response);
|
||
return response;
|
||
}
|
||
|
||
// Step 21.
|
||
if request.body.is_some() && matches!(current_scheme, "http" | "https") {
|
||
// XXXManishearth: We actually should be calling process_request
|
||
// in http_network_fetch. However, we can't yet follow the request
|
||
// upload progress, so I'm keeping it here for now and pretending
|
||
// the body got sent in one chunk
|
||
target.process_request_body(request);
|
||
}
|
||
|
||
// Step 22.
|
||
target.process_response(request, &response);
|
||
// Send Response to Devtools
|
||
send_response_to_devtools(request, context, &response, None);
|
||
send_security_info_to_devtools(request, context, &response);
|
||
|
||
// Step 23.
|
||
if !response_loaded {
|
||
wait_for_response(request, &mut response, target, done_chan, context).await;
|
||
}
|
||
|
||
// Step 24.
|
||
target.process_response_eof(request, &response);
|
||
// Send Response to Devtools
|
||
// This is done after process_response_eof to ensure that the body is fully
|
||
// processed before sending the response to Devtools.
|
||
send_response_to_devtools(request, context, &response, None);
|
||
|
||
context
|
||
.state
|
||
.http_cache
|
||
.update_awaiting_consumers(request, &response)
|
||
.await;
|
||
|
||
// Steps 25-27.
|
||
// TODO: remove this line when only asynchronous fetches are used
|
||
response
|
||
}
|
||
|
||
async fn wait_for_response(
|
||
request: &Request,
|
||
response: &mut Response,
|
||
target: Target<'_>,
|
||
done_chan: &mut DoneChannel,
|
||
context: &FetchContext,
|
||
) {
|
||
if let Some(ref mut ch) = *done_chan {
|
||
let mut devtools_body = context.devtools_chan.as_ref().map(|_| Vec::new());
|
||
loop {
|
||
match ch.1.recv().await {
|
||
Some(Data::Payload(vec)) => {
|
||
if let Some(body) = devtools_body.as_mut() {
|
||
body.extend(&vec);
|
||
}
|
||
target.process_response_chunk(request, vec);
|
||
},
|
||
Some(Data::Error(network_error)) => {
|
||
if network_error == NetworkError::DecompressionError {
|
||
response.termination_reason = Some(TerminationReason::Fatal);
|
||
}
|
||
response.set_network_error(network_error);
|
||
|
||
break;
|
||
},
|
||
Some(Data::Done) => {
|
||
send_response_to_devtools(request, context, response, devtools_body);
|
||
break;
|
||
},
|
||
Some(Data::Cancelled) => {
|
||
response.aborted.store(true, Ordering::Release);
|
||
break;
|
||
},
|
||
_ => {
|
||
panic!("fetch worker should always send Done before terminating");
|
||
},
|
||
}
|
||
}
|
||
} else {
|
||
match *response.actual_response().body.lock() {
|
||
ResponseBody::Done(ref vec) if !vec.is_empty() => {
|
||
// in case there was no channel to wait for, the body was
|
||
// obtained synchronously via scheme_fetch for data/file/about/etc
|
||
// We should still send the body across as a chunk
|
||
target.process_response_chunk(request, vec.clone());
|
||
if context.devtools_chan.is_some() {
|
||
// Now that we've replayed the entire cached body,
|
||
// notify the DevTools server with the full Response.
|
||
send_response_to_devtools(request, context, response, Some(vec.clone()));
|
||
}
|
||
},
|
||
ResponseBody::Done(_) | ResponseBody::Empty => {},
|
||
_ => unreachable!(),
|
||
}
|
||
}
|
||
}
|
||
|
||
/// Range header start and end values.
|
||
pub enum RangeRequestBounds {
|
||
/// The range bounds are known and set to final values.
|
||
Final(RelativePos),
|
||
/// We need extra information to set the range bounds.
|
||
/// i.e. buffer or file size.
|
||
Pending(u64),
|
||
}
|
||
|
||
impl RangeRequestBounds {
|
||
pub fn get_final(&self, len: Option<u64>) -> Result<RelativePos, &'static str> {
|
||
match self {
|
||
RangeRequestBounds::Final(pos) => {
|
||
if let Some(len) = len {
|
||
if pos.start <= len as i64 {
|
||
return Ok(*pos);
|
||
}
|
||
}
|
||
Err("Tried to process RangeRequestBounds::Final without len")
|
||
},
|
||
RangeRequestBounds::Pending(offset) => Ok(RelativePos::from_opts(
|
||
if let Some(len) = len {
|
||
Some((len - u64::min(len, *offset)) as i64)
|
||
} else {
|
||
Some(0)
|
||
},
|
||
None,
|
||
)),
|
||
}
|
||
}
|
||
}
|
||
|
||
fn create_blank_reply(url: ServoUrl, timing_type: ResourceTimingType) -> Response {
|
||
let mut response = Response::new(url, ResourceFetchTiming::new(timing_type));
|
||
response
|
||
.headers
|
||
.typed_insert(ContentType::from(mime::TEXT_HTML_UTF_8));
|
||
*response.body.lock() = ResponseBody::Done(vec![]);
|
||
response.status = HttpStatus::default();
|
||
response
|
||
}
|
||
|
||
fn create_about_memory(url: ServoUrl, timing_type: ResourceTimingType) -> Response {
|
||
let mut response = Response::new(url, ResourceFetchTiming::new(timing_type));
|
||
response
|
||
.headers
|
||
.typed_insert(ContentType::from(mime::TEXT_HTML_UTF_8));
|
||
*response.body.lock() = ResponseBody::Done(resources::read_bytes(Resource::AboutMemoryHTML));
|
||
response.status = HttpStatus::default();
|
||
response
|
||
}
|
||
|
||
/// Handle a request from the user interface to ignore validation errors for a certificate.
|
||
fn handle_allowcert_request(request: &mut Request, context: &FetchContext) -> io::Result<()> {
|
||
let error = |string| Err(io::Error::other(string));
|
||
|
||
let body = match request.body.as_mut() {
|
||
Some(body) => body,
|
||
None => return error("No body found"),
|
||
};
|
||
|
||
let stream = body.clone_stream();
|
||
let mut stream = stream.lock();
|
||
let (body_chan, body_port) = ipc::channel().unwrap();
|
||
let Some(chunk_requester) = stream.as_mut() else {
|
||
log::error!(
|
||
"Could not connect to the request body stream because it has already been closed."
|
||
);
|
||
return Err(std::io::Error::other("Could not send BodyChunkRequest"));
|
||
};
|
||
chunk_requester
|
||
.send(BodyChunkRequest::Connect(body_chan))
|
||
.map_err(|error| {
|
||
log::error!(
|
||
"Could not connect to the request body stream because it has already been closed: {error}"
|
||
);
|
||
std::io::Error::other("Could not connect to request body stream")
|
||
})?;
|
||
chunk_requester
|
||
.send(BodyChunkRequest::Chunk)
|
||
.map_err(|error| {
|
||
log::error!(
|
||
"Could not request the first request body chunk because the body stream has already been closed: {error}"
|
||
);
|
||
std::io::Error::other("Could not request request body chunk")
|
||
})?;
|
||
let body_bytes = match body_port.recv().ok() {
|
||
Some(BodyChunkResponse::Chunk(bytes)) => bytes,
|
||
_ => return error("Certificate not sent in a single chunk"),
|
||
};
|
||
|
||
let split_idx = match body_bytes.iter().position(|b| *b == b'&') {
|
||
Some(split_idx) => split_idx,
|
||
None => return error("Could not find ampersand in data"),
|
||
};
|
||
let (secret, cert_base64) = body_bytes.split_at(split_idx);
|
||
|
||
let secret = str::from_utf8(secret).ok().and_then(|s| s.parse().ok());
|
||
if secret != Some(*net_traits::PRIVILEGED_SECRET) {
|
||
return error("Invalid secret sent. Ignoring request");
|
||
}
|
||
|
||
let cert_bytes = match general_purpose::STANDARD_NO_PAD.decode(&cert_base64[1..]) {
|
||
Ok(bytes) => bytes,
|
||
Err(_) => return error("Could not decode certificate base64"),
|
||
};
|
||
|
||
context
|
||
.state
|
||
.override_manager
|
||
.add_override(&CertificateDer::from_slice(&cert_bytes).into_owned());
|
||
Ok(())
|
||
}
|
||
|
||
/// [Scheme fetch](https://fetch.spec.whatwg.org#scheme-fetch)
|
||
async fn scheme_fetch(
|
||
fetch_params: &mut FetchParams,
|
||
cache: &mut CorsCache,
|
||
target: Target<'_>,
|
||
done_chan: &mut DoneChannel,
|
||
context: &FetchContext,
|
||
) -> Response {
|
||
// Step 1: If fetchParams is canceled, then return the appropriate network error for fetchParams.
|
||
|
||
// Step 2: Let request be fetchParams’s request.
|
||
let request = &mut fetch_params.request;
|
||
let url_and_blob_lock = request.current_url_with_blob_claim();
|
||
|
||
let scheme = url_and_blob_lock.scheme();
|
||
match scheme {
|
||
"about" if url_and_blob_lock.path() == "blank" => {
|
||
create_blank_reply(url_and_blob_lock.url(), request.timing_type())
|
||
},
|
||
"about" if url_and_blob_lock.path() == "memory" => {
|
||
create_about_memory(url_and_blob_lock.url(), request.timing_type())
|
||
},
|
||
|
||
"chrome" if url_and_blob_lock.path() == "allowcert" => {
|
||
if let Err(error) = handle_allowcert_request(request, context) {
|
||
warn!("Could not handle allowcert request: {error}");
|
||
}
|
||
create_blank_reply(url_and_blob_lock.url(), request.timing_type())
|
||
},
|
||
|
||
"http" | "https" => {
|
||
http_fetch(
|
||
fetch_params,
|
||
cache,
|
||
false,
|
||
false,
|
||
false,
|
||
target,
|
||
done_chan,
|
||
context,
|
||
)
|
||
.await
|
||
},
|
||
|
||
_ => match context.protocols.get(scheme) {
|
||
Some(handler) => handler.load(request, done_chan, context).await,
|
||
None => Response::network_error(NetworkError::UnsupportedScheme),
|
||
},
|
||
}
|
||
}
|
||
|
||
fn is_null_body_status(status: &HttpStatus) -> bool {
|
||
matches!(
|
||
status.try_code(),
|
||
Some(StatusCode::SWITCHING_PROTOCOLS) |
|
||
Some(StatusCode::NO_CONTENT) |
|
||
Some(StatusCode::RESET_CONTENT) |
|
||
Some(StatusCode::NOT_MODIFIED)
|
||
)
|
||
}
|
||
|
||
/// <https://fetch.spec.whatwg.org/#should-response-to-request-be-blocked-due-to-nosniff?>
|
||
pub fn should_be_blocked_due_to_nosniff(
|
||
destination: Destination,
|
||
response_headers: &HeaderMap,
|
||
) -> bool {
|
||
// Step 1
|
||
if !determine_nosniff(response_headers) {
|
||
return false;
|
||
}
|
||
|
||
// Step 2
|
||
// Note: an invalid MIME type will produce a `None`.
|
||
let mime_type = extract_mime_type_as_mime(response_headers);
|
||
|
||
/// <https://html.spec.whatwg.org/multipage/#scriptingLanguages>
|
||
#[inline]
|
||
fn is_javascript_mime_type(mime_type: &Mime) -> bool {
|
||
let javascript_mime_types: [Mime; 16] = [
|
||
"application/ecmascript".parse().unwrap(),
|
||
"application/javascript".parse().unwrap(),
|
||
"application/x-ecmascript".parse().unwrap(),
|
||
"application/x-javascript".parse().unwrap(),
|
||
"text/ecmascript".parse().unwrap(),
|
||
"text/javascript".parse().unwrap(),
|
||
"text/javascript1.0".parse().unwrap(),
|
||
"text/javascript1.1".parse().unwrap(),
|
||
"text/javascript1.2".parse().unwrap(),
|
||
"text/javascript1.3".parse().unwrap(),
|
||
"text/javascript1.4".parse().unwrap(),
|
||
"text/javascript1.5".parse().unwrap(),
|
||
"text/jscript".parse().unwrap(),
|
||
"text/livescript".parse().unwrap(),
|
||
"text/x-ecmascript".parse().unwrap(),
|
||
"text/x-javascript".parse().unwrap(),
|
||
];
|
||
|
||
javascript_mime_types
|
||
.iter()
|
||
.any(|mime| mime.type_() == mime_type.type_() && mime.subtype() == mime_type.subtype())
|
||
}
|
||
|
||
match mime_type {
|
||
// Step 4
|
||
Some(ref mime_type) if destination.is_script_like() => !is_javascript_mime_type(mime_type),
|
||
// Step 5
|
||
Some(ref mime_type) if destination == Destination::Style => {
|
||
mime_type.type_() != mime::TEXT && mime_type.subtype() != mime::CSS
|
||
},
|
||
|
||
None if destination == Destination::Style || destination.is_script_like() => true,
|
||
// Step 6
|
||
_ => false,
|
||
}
|
||
}
|
||
|
||
/// <https://fetch.spec.whatwg.org/#should-response-to-request-be-blocked-due-to-mime-type?>
|
||
fn should_be_blocked_due_to_mime_type(
|
||
destination: Destination,
|
||
response_headers: &HeaderMap,
|
||
) -> bool {
|
||
// Step 1: Let mimeType be the result of extracting a MIME type from response’s header list.
|
||
let mime_type: mime::Mime = match extract_mime_type_as_mime(response_headers) {
|
||
Some(mime_type) => mime_type,
|
||
// Step 2: If mimeType is failure, then return allowed.
|
||
None => return false,
|
||
};
|
||
|
||
// Step 3: Let destination be request’s destination.
|
||
// Step 4: If destination is script-like and one of the following is true, then return blocked:
|
||
// - mimeType’s essence starts with "audio/", "image/", or "video/".
|
||
// - mimeType’s essence is "text/csv".
|
||
// Step 5: Return allowed.
|
||
destination.is_script_like() &&
|
||
match mime_type.type_() {
|
||
mime::AUDIO | mime::VIDEO | mime::IMAGE => true,
|
||
mime::TEXT if mime_type.subtype() == mime::CSV => true,
|
||
_ => false,
|
||
}
|
||
}
|
||
|
||
/// <https://fetch.spec.whatwg.org/#block-bad-port>
|
||
pub fn should_request_be_blocked_due_to_a_bad_port(url: &ServoUrl) -> bool {
|
||
// Step 1. Let url be request’s current URL.
|
||
// NOTE: We receive the request url as an argument
|
||
|
||
// Step 2. If url’s scheme is an HTTP(S) scheme and url’s port is a bad port, then return blocked.
|
||
let is_http_scheme = matches!(url.scheme(), "http" | "https");
|
||
let is_bad_port = url.port().is_some_and(is_bad_port);
|
||
if is_http_scheme && is_bad_port {
|
||
return true;
|
||
}
|
||
|
||
// Step 3. Return allowed.
|
||
false
|
||
}
|
||
|
||
/// <https://w3c.github.io/webappsec-mixed-content/#should-block-fetch>
|
||
pub fn should_request_be_blocked_as_mixed_content(
|
||
request: &Request,
|
||
protocol_registry: &ProtocolRegistry,
|
||
) -> bool {
|
||
// Step 1. Return allowed if one or more of the following conditions are met:
|
||
// 1.1. Does settings prohibit mixed security contexts?
|
||
// returns "Does Not Restrict Mixed Security Contexts" when applied to request’s client.
|
||
if do_settings_prohibit_mixed_security_contexts(request) ==
|
||
MixedSecurityProhibited::NotProhibited
|
||
{
|
||
return false;
|
||
}
|
||
|
||
// 1.2. request’s URL is a potentially trustworthy URL.
|
||
if is_url_potentially_trustworthy(protocol_registry, &request.current_url()) {
|
||
return false;
|
||
}
|
||
|
||
// 1.3. The user agent has been instructed to allow mixed content.
|
||
|
||
// 1.4. request’s destination is "document", and request’s target browsing context has
|
||
// no parent browsing context.
|
||
if request.destination == Destination::Document {
|
||
// TODO: request's target browsing context has no parent browsing context
|
||
return false;
|
||
}
|
||
|
||
true
|
||
}
|
||
|
||
/// <https://w3c.github.io/webappsec-mixed-content/#should-block-response>
|
||
pub fn should_response_be_blocked_as_mixed_content(
|
||
request: &Request,
|
||
response: &Response,
|
||
protocol_registry: &ProtocolRegistry,
|
||
) -> bool {
|
||
// Step 1. Return allowed if one or more of the following conditions are met:
|
||
// 1.1. Does settings prohibit mixed security contexts? returns Does Not Restrict Mixed Content
|
||
// when applied to request’s client.
|
||
if do_settings_prohibit_mixed_security_contexts(request) ==
|
||
MixedSecurityProhibited::NotProhibited
|
||
{
|
||
return false;
|
||
}
|
||
|
||
// 1.2. response’s url is a potentially trustworthy URL.
|
||
if response
|
||
.actual_response()
|
||
.url()
|
||
.is_some_and(|response_url| is_url_potentially_trustworthy(protocol_registry, response_url))
|
||
{
|
||
return false;
|
||
}
|
||
|
||
// 1.3. TODO: The user agent has been instructed to allow mixed content.
|
||
|
||
// 1.4. request’s destination is "document", and request’s target browsing context
|
||
// has no parent browsing context.
|
||
if request.destination == Destination::Document {
|
||
// TODO: if requests target browsing context has no parent browsing context
|
||
return false;
|
||
}
|
||
|
||
true
|
||
}
|
||
|
||
/// <https://fetch.spec.whatwg.org/#bad-port>
|
||
fn is_bad_port(port: u16) -> bool {
|
||
static BAD_PORTS: [u16; 78] = [
|
||
1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 69, 77, 79, 87, 95, 101,
|
||
102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 137, 139, 143, 161, 179, 389,
|
||
427, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 548, 554, 556, 563, 587, 601, 636,
|
||
993, 995, 1719, 1720, 1723, 2049, 3659, 4045, 5060, 5061, 6000, 6566, 6665, 6666, 6667,
|
||
6668, 6669, 6697, 10080,
|
||
];
|
||
|
||
BAD_PORTS.binary_search(&port).is_ok()
|
||
}
|
||
|
||
// TODO : Investigate and need to revisit again
|
||
pub fn is_form_submission_request(request: &Request) -> bool {
|
||
let content_type = request.headers.typed_get::<ContentType>();
|
||
content_type.is_some_and(|ct| {
|
||
let mime: Mime = ct.into();
|
||
mime.type_() == mime::APPLICATION && mime.subtype() == mime::WWW_FORM_URLENCODED
|
||
})
|
||
}
|
||
|
||
/// <https://w3c.github.io/webappsec-upgrade-insecure-requests/#upgrade-request>
|
||
fn should_upgrade_request_to_potentially_trustworthy(
|
||
request: &mut Request,
|
||
context: &FetchContext,
|
||
) -> bool {
|
||
fn should_upgrade_navigation_request(request: &Request) -> bool {
|
||
// Step 2.1 If request is a form submission, skip the remaining substeps, and continue upgrading request.
|
||
if is_form_submission_request(request) {
|
||
return true;
|
||
}
|
||
|
||
// Step 2.2 If request’s client's target browsing context is a nested browsing context,
|
||
// skip the remaining substeps and continue upgrading request.
|
||
if request
|
||
.client
|
||
.as_ref()
|
||
.is_some_and(|client| client.is_nested_browsing_context)
|
||
{
|
||
return true;
|
||
}
|
||
|
||
// Step 2.4
|
||
// TODO : check for insecure navigation set after its implemention
|
||
|
||
// Step 2.5 Return without further modifying request
|
||
false
|
||
}
|
||
|
||
// Step 1. If request is a navigation request,
|
||
if request.is_navigation_request() {
|
||
// Append a header named Upgrade-Insecure-Requests with a value of 1 to
|
||
// request’s header list if any of the following criteria are met:
|
||
// * request’s URL is not a potentially trustworthy URL
|
||
// * request’s URL's host is not a preloadable HSTS host
|
||
if !is_url_potentially_trustworthy(&context.protocols, &request.current_url()) ||
|
||
request
|
||
.current_url()
|
||
.host_str()
|
||
.is_none_or(|host| context.state.hsts_list.read().is_host_secure(host))
|
||
{
|
||
debug!("Appending the Upgrade-Insecure-Requests header to request’s header list");
|
||
request
|
||
.headers
|
||
.insert("Upgrade-Insecure-Requests", HeaderValue::from_static("1"));
|
||
}
|
||
|
||
if !should_upgrade_navigation_request(request) {
|
||
return false;
|
||
}
|
||
}
|
||
|
||
// Step 3. Let upgrade state be the result of executing
|
||
// §4.2 Should insecure requests be upgraded for client? upon request's client.
|
||
// Step 4. If upgrade state is "Do Not Upgrade", return without modifying request.
|
||
request
|
||
.client
|
||
.as_ref()
|
||
.is_some_and(|client| client.insecure_requests_policy == InsecureRequestsPolicy::Upgrade)
|
||
}
|
||
|
||
#[derive(Debug, PartialEq)]
|
||
pub enum MixedSecurityProhibited {
|
||
Prohibited,
|
||
NotProhibited,
|
||
}
|
||
|
||
/// <https://w3c.github.io/webappsec-mixed-content/#categorize-settings-object>
|
||
fn do_settings_prohibit_mixed_security_contexts(request: &Request) -> MixedSecurityProhibited {
|
||
if let Origin::Origin(ref origin) = request.origin {
|
||
// Step 1. If settings’ origin is a potentially trustworthy origin,
|
||
// then return "Prohibits Mixed Security Contexts".
|
||
// NOTE: Workers created from a data: url are secure if they were created from secure contexts
|
||
if origin.is_potentially_trustworthy() || origin.is_for_data_worker_from_secure_context() {
|
||
return MixedSecurityProhibited::Prohibited;
|
||
}
|
||
}
|
||
|
||
// Step 2.2. For each navigable navigable in document’s ancestor navigables:
|
||
// Step 2.2.1. If navigable’s active document's origin is a potentially trustworthy origin,
|
||
// then return "Prohibits Mixed Security Contexts".
|
||
if request.has_trustworthy_ancestor_origin {
|
||
return MixedSecurityProhibited::Prohibited;
|
||
}
|
||
|
||
MixedSecurityProhibited::NotProhibited
|
||
}
|
||
|
||
/// <https://w3c.github.io/webappsec-mixed-content/#upgrade-algorithm>
|
||
fn should_upgrade_mixed_content_request(
|
||
request: &Request,
|
||
protocol_registry: &ProtocolRegistry,
|
||
) -> bool {
|
||
let url = request.url();
|
||
// Step 1.1 : request’s URL is a potentially trustworthy URL.
|
||
if is_url_potentially_trustworthy(protocol_registry, &url) {
|
||
return false;
|
||
}
|
||
|
||
// Step 1.2 : request’s URL’s host is an IP address.
|
||
match url.host() {
|
||
Some(Host::Ipv4(_)) | Some(Host::Ipv6(_)) => return false,
|
||
_ => (),
|
||
}
|
||
|
||
// Step 1.3
|
||
if do_settings_prohibit_mixed_security_contexts(request) ==
|
||
MixedSecurityProhibited::NotProhibited
|
||
{
|
||
return false;
|
||
}
|
||
|
||
// Step 1.4 : request’s destination is not "image", "audio", or "video".
|
||
if !matches!(
|
||
request.destination,
|
||
Destination::Audio | Destination::Image | Destination::Video
|
||
) {
|
||
return false;
|
||
}
|
||
|
||
// Step 1.5 : request’s destination is "image" and request’s initiator is "imageset".
|
||
if request.destination == Destination::Image && request.initiator == Initiator::ImageSet {
|
||
return false;
|
||
}
|
||
|
||
true
|
||
}
|