mirror of
https://github.com/servo/servo
synced 2026-05-11 01:22:19 +02:00
#41857 exposed some more code in the net crate that performed blocking communication with the embedder and could prevent other networking tasks from running. To address that, we need the net code to perform async receive operations, which requires passing tokio channels to the embedder. However, the current embedding message design puts all messages in the same enum and requires that they are serializable. Embedder messages from the network do not require this property, since they run in the same process as the embedder. Therefore, this PR creates a new EmbedderProxy structure that is generic over the message, allowing each component of Servo to use a embedder message type that is specific to that component. The final benefit of this set of changes is that the embedder messages for a particular crate can now live in the crate itself (since the only crate that depends on it is the servo crate), not in the shared `embedding_traits` crate. This hugely reduces the amount of code that needs to be rebuilt when changing these messages, enabling much faster incremental builds for those changes. Testing: Strictly refactoring; existing test coverage is sufficient. Fixes: #41958 --------- Signed-off-by: Josh Matthews <josh@joshmatthews.net>
949 lines
34 KiB
Rust
949 lines
34 KiB
Rust
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
|
|
|
use std::fs::File;
|
|
use std::io::{BufRead, BufReader, Seek, SeekFrom};
|
|
use std::ops::Index;
|
|
use std::path::{Path, PathBuf};
|
|
use std::sync::Arc;
|
|
use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
|
|
|
|
use embedder_traits::{
|
|
EmbedderControlId, EmbedderControlResponse, FilePickerRequest, GenericEmbedderProxy,
|
|
SelectedFile,
|
|
};
|
|
use headers::{ContentLength, ContentRange, ContentType, HeaderMap, HeaderMapExt, Range};
|
|
use http::header::{self, HeaderValue};
|
|
use ipc_channel::ipc::IpcSender;
|
|
use log::warn;
|
|
use mime::{self, Mime};
|
|
use net_traits::blob_url_store::{BlobBuf, BlobURLStoreError};
|
|
use net_traits::filemanager_thread::{
|
|
FileManagerResult, FileManagerThreadError, FileManagerThreadMsg, FileTokenCheck,
|
|
ReadFileProgress, RelativePos,
|
|
};
|
|
use net_traits::http_percent_encode;
|
|
use net_traits::response::{Response, ResponseBody};
|
|
use parking_lot::{Mutex, RwLock};
|
|
use rustc_hash::{FxHashMap, FxHashSet};
|
|
use servo_arc::Arc as ServoArc;
|
|
use servo_url::ImmutableOrigin;
|
|
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
|
use tokio::sync::mpsc::UnboundedSender as TokioSender;
|
|
use tokio::task::yield_now;
|
|
use uuid::Uuid;
|
|
|
|
use crate::async_runtime::spawn_task;
|
|
use crate::embedder::NetToEmbedderMsg;
|
|
use crate::fetch::methods::{CancellationListener, Data, RangeRequestBounds};
|
|
use crate::protocols::get_range_request_bounds;
|
|
|
|
pub const FILE_CHUNK_SIZE: usize = 32768; // 32 KB
|
|
|
|
/// FileManagerStore's entry
|
|
struct FileStoreEntry {
|
|
/// Origin of the entry's "creator"
|
|
origin: ImmutableOrigin,
|
|
/// Backend implementation
|
|
file_impl: FileImpl,
|
|
/// Number of FileID holders that the ID is used to
|
|
/// index this entry in `FileManagerStore`.
|
|
/// Reference holders include a FileStoreEntry or
|
|
/// a script-side File-based Blob
|
|
refs: AtomicUsize,
|
|
/// UUIDs only become valid blob URIs when explicitly requested
|
|
/// by the user with createObjectURL. Validity can be revoked as well.
|
|
/// (The UUID is the one that maps to this entry in `FileManagerStore`)
|
|
is_valid_url: AtomicBool,
|
|
/// UUIDs of fetch instances that acquired an interest in this file,
|
|
/// when the url was still valid.
|
|
outstanding_tokens: FxHashSet<Uuid>,
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
struct FileMetaData {
|
|
path: PathBuf,
|
|
size: u64,
|
|
}
|
|
|
|
/// File backend implementation
|
|
#[derive(Clone)]
|
|
enum FileImpl {
|
|
/// Metadata of on-disk file
|
|
MetaDataOnly(FileMetaData),
|
|
/// In-memory Blob buffer object
|
|
Memory(BlobBuf),
|
|
/// A reference to parent entry in `FileManagerStore`,
|
|
/// representing a sliced version of the parent entry data
|
|
Sliced(Uuid, RelativePos),
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
pub struct FileManager {
|
|
embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
|
|
store: Arc<FileManagerStore>,
|
|
}
|
|
|
|
impl FileManager {
|
|
pub fn new(embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>) -> FileManager {
|
|
FileManager {
|
|
embedder_proxy,
|
|
store: Arc::new(FileManagerStore::new()),
|
|
}
|
|
}
|
|
|
|
fn read_file(
|
|
&self,
|
|
sender: IpcSender<FileManagerResult<ReadFileProgress>>,
|
|
id: Uuid,
|
|
origin: ImmutableOrigin,
|
|
) {
|
|
let store = self.store.clone();
|
|
spawn_task(async move {
|
|
if let Err(e) = store.try_read_file(&sender, id, origin).await {
|
|
let _ = sender.send(Err(FileManagerThreadError::BlobURLStoreError(e)));
|
|
}
|
|
});
|
|
}
|
|
|
|
pub(crate) fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
|
|
self.store.get_token_for_file(file_id)
|
|
}
|
|
|
|
pub(crate) fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
|
|
self.store.invalidate_token(token, file_id);
|
|
}
|
|
|
|
/// Read a file for the Fetch implementation.
|
|
/// It gets the required headers synchronously and reads the actual content
|
|
/// in a separate thread.
|
|
#[expect(clippy::too_many_arguments)]
|
|
pub(crate) fn fetch_file(
|
|
&self,
|
|
done_sender: &mut TokioSender<Data>,
|
|
cancellation_listener: Arc<CancellationListener>,
|
|
id: Uuid,
|
|
file_token: &FileTokenCheck,
|
|
origin: ImmutableOrigin,
|
|
response: &mut Response,
|
|
range: Option<Range>,
|
|
) -> Result<(), BlobURLStoreError> {
|
|
self.fetch_blob_buf(
|
|
done_sender,
|
|
cancellation_listener,
|
|
&id,
|
|
file_token,
|
|
&origin,
|
|
BlobBounds::Unresolved(range),
|
|
response,
|
|
)
|
|
}
|
|
|
|
pub fn promote_memory(
|
|
&self,
|
|
id: Uuid,
|
|
blob_buf: BlobBuf,
|
|
set_valid: bool,
|
|
origin: ImmutableOrigin,
|
|
) {
|
|
self.store.promote_memory(id, blob_buf, set_valid, origin);
|
|
}
|
|
|
|
/// Message handler
|
|
pub fn handle(&self, msg: FileManagerThreadMsg) {
|
|
match msg {
|
|
FileManagerThreadMsg::SelectFiles(control_id, file_picker_request, response_sender) => {
|
|
let store = self.store.clone();
|
|
let embedder = self.embedder_proxy.clone();
|
|
spawn_task(async move {
|
|
let embedder_control_msg = store
|
|
.select_files(control_id, file_picker_request, embedder)
|
|
.await;
|
|
response_sender.send(embedder_control_msg).unwrap();
|
|
});
|
|
},
|
|
FileManagerThreadMsg::ReadFile(sender, id, origin) => {
|
|
self.read_file(sender, id, origin);
|
|
},
|
|
FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin) => {
|
|
self.promote_memory(id, blob_buf, set_valid, origin);
|
|
},
|
|
FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) => {
|
|
self.store.add_sliced_url_entry(id, rel_pos, sender, origin);
|
|
},
|
|
FileManagerThreadMsg::DecRef(id, origin, sender) => {
|
|
let _ = sender.send(self.store.dec_ref(&id, &origin));
|
|
},
|
|
FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
|
|
let _ = sender.send(self.store.set_blob_url_validity(false, &id, &origin));
|
|
},
|
|
FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
|
|
let _ = sender.send(self.store.set_blob_url_validity(true, &id, &origin));
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn fetch_file_in_chunks(
|
|
&self,
|
|
done_sender: &mut TokioSender<Data>,
|
|
mut reader: BufReader<File>,
|
|
res_body: ServoArc<Mutex<ResponseBody>>,
|
|
cancellation_listener: Arc<CancellationListener>,
|
|
range: RelativePos,
|
|
) {
|
|
let done_sender = done_sender.clone();
|
|
spawn_task(async move {
|
|
loop {
|
|
if cancellation_listener.cancelled() {
|
|
*res_body.lock() = ResponseBody::Done(vec![]);
|
|
let _ = done_sender.send(Data::Cancelled);
|
|
return;
|
|
}
|
|
let length = {
|
|
let buffer = reader.fill_buf().unwrap().to_vec();
|
|
let mut buffer_len = buffer.len();
|
|
if let ResponseBody::Receiving(ref mut body) = *res_body.lock() {
|
|
let offset = usize::min(
|
|
{
|
|
if let Some(end) = range.end {
|
|
// HTTP Range requests are specified with closed ranges,
|
|
// while Rust uses half-open ranges. We add +1 here so
|
|
// we don't skip the last requested byte.
|
|
let remaining_bytes =
|
|
end as usize - range.start as usize - body.len() + 1;
|
|
if remaining_bytes <= FILE_CHUNK_SIZE {
|
|
// This is the last chunk so we set buffer
|
|
// len to 0 to break the reading loop.
|
|
buffer_len = 0;
|
|
remaining_bytes
|
|
} else {
|
|
FILE_CHUNK_SIZE
|
|
}
|
|
} else {
|
|
FILE_CHUNK_SIZE
|
|
}
|
|
},
|
|
buffer.len(),
|
|
);
|
|
let chunk = &buffer[0..offset];
|
|
body.extend_from_slice(chunk);
|
|
let _ = done_sender.send(Data::Payload(chunk.to_vec()));
|
|
}
|
|
buffer_len
|
|
};
|
|
if length == 0 {
|
|
let mut body = res_body.lock();
|
|
let completed_body = match *body {
|
|
ResponseBody::Receiving(ref mut body) => std::mem::take(body),
|
|
_ => vec![],
|
|
};
|
|
*body = ResponseBody::Done(completed_body);
|
|
let _ = done_sender.send(Data::Done);
|
|
break;
|
|
}
|
|
reader.consume(length);
|
|
yield_now().await
|
|
}
|
|
});
|
|
}
|
|
|
|
#[expect(clippy::too_many_arguments)]
|
|
fn fetch_blob_buf(
|
|
&self,
|
|
done_sender: &mut TokioSender<Data>,
|
|
cancellation_listener: Arc<CancellationListener>,
|
|
id: &Uuid,
|
|
file_token: &FileTokenCheck,
|
|
origin_in: &ImmutableOrigin,
|
|
bounds: BlobBounds,
|
|
response: &mut Response,
|
|
) -> Result<(), BlobURLStoreError> {
|
|
let file_impl = self.store.get_impl(id, file_token, origin_in)?;
|
|
/*
|
|
Only Fetch Blob Range Request would have unresolved range, and only in that case we care about range header.
|
|
*/
|
|
let mut is_range_requested = false;
|
|
match file_impl {
|
|
FileImpl::Memory(buf) => {
|
|
let bounds = match bounds {
|
|
BlobBounds::Unresolved(range) => {
|
|
if range.is_some() {
|
|
is_range_requested = true;
|
|
}
|
|
get_range_request_bounds(range, buf.size)
|
|
},
|
|
BlobBounds::Resolved(bounds) => bounds,
|
|
};
|
|
let range = bounds
|
|
.get_final(Some(buf.size))
|
|
.map_err(|_| BlobURLStoreError::InvalidRange)?;
|
|
|
|
let range = range.to_abs_blob_range(buf.size as usize);
|
|
let len = range.len() as u64;
|
|
let content_range = if is_range_requested {
|
|
ContentRange::bytes(range.start as u64..range.end as u64, buf.size).ok()
|
|
} else {
|
|
None
|
|
};
|
|
|
|
set_headers(
|
|
&mut response.headers,
|
|
len,
|
|
buf.type_string.parse().unwrap_or(mime::TEXT_PLAIN),
|
|
/* filename */ None,
|
|
content_range,
|
|
);
|
|
|
|
let mut bytes = vec![];
|
|
bytes.extend_from_slice(buf.bytes.index(range));
|
|
|
|
let _ = done_sender.send(Data::Payload(bytes));
|
|
let _ = done_sender.send(Data::Done);
|
|
|
|
Ok(())
|
|
},
|
|
FileImpl::MetaDataOnly(metadata) => {
|
|
/* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
|
|
Concretely, here we create another file, and this file might not
|
|
has the same underlying file state (meta-info plus content) as the time
|
|
create_entry is called.
|
|
*/
|
|
|
|
let file = File::open(&metadata.path)
|
|
.map_err(|e| BlobURLStoreError::External(e.to_string()))?;
|
|
let mut is_range_requested = false;
|
|
let bounds = match bounds {
|
|
BlobBounds::Unresolved(range) => {
|
|
if range.is_some() {
|
|
is_range_requested = true;
|
|
}
|
|
get_range_request_bounds(range, metadata.size)
|
|
},
|
|
BlobBounds::Resolved(bounds) => bounds,
|
|
};
|
|
let range = bounds
|
|
.get_final(Some(metadata.size))
|
|
.map_err(|_| BlobURLStoreError::InvalidRange)?;
|
|
|
|
let mut reader = BufReader::with_capacity(FILE_CHUNK_SIZE, file);
|
|
if reader.seek(SeekFrom::Start(range.start as u64)).is_err() {
|
|
return Err(BlobURLStoreError::External(
|
|
"Unexpected method for blob".into(),
|
|
));
|
|
}
|
|
|
|
let filename = metadata
|
|
.path
|
|
.file_name()
|
|
.and_then(|osstr| osstr.to_str())
|
|
.map(|s| s.to_string());
|
|
|
|
let content_range = if is_range_requested {
|
|
let abs_range = range.to_abs_blob_range(metadata.size as usize);
|
|
ContentRange::bytes(abs_range.start as u64..abs_range.end as u64, metadata.size)
|
|
.ok()
|
|
} else {
|
|
None
|
|
};
|
|
set_headers(
|
|
&mut response.headers,
|
|
metadata.size,
|
|
mime_guess::from_path(metadata.path)
|
|
.first()
|
|
.unwrap_or(mime::TEXT_PLAIN),
|
|
filename,
|
|
content_range,
|
|
);
|
|
|
|
self.fetch_file_in_chunks(
|
|
&mut done_sender.clone(),
|
|
reader,
|
|
response.body.clone(),
|
|
cancellation_listener,
|
|
range,
|
|
);
|
|
|
|
Ok(())
|
|
},
|
|
FileImpl::Sliced(parent_id, inner_rel_pos) => {
|
|
// Next time we don't need to check validity since
|
|
// we have already done that for requesting URL if necessary.
|
|
let bounds = RangeRequestBounds::Final(
|
|
RelativePos::full_range().slice_inner(&inner_rel_pos),
|
|
);
|
|
self.fetch_blob_buf(
|
|
done_sender,
|
|
cancellation_listener,
|
|
&parent_id,
|
|
file_token,
|
|
origin_in,
|
|
BlobBounds::Resolved(bounds),
|
|
response,
|
|
)
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
enum BlobBounds {
|
|
Unresolved(Option<Range>),
|
|
Resolved(RangeRequestBounds),
|
|
}
|
|
|
|
/// File manager's data store. It maintains a thread-safe mapping
|
|
/// from FileID to FileStoreEntry which might have different backend implementation.
|
|
/// Access to the content is encapsulated as methods of this struct.
|
|
struct FileManagerStore {
|
|
entries: RwLock<FxHashMap<Uuid, FileStoreEntry>>,
|
|
}
|
|
|
|
impl FileManagerStore {
|
|
fn new() -> Self {
|
|
FileManagerStore {
|
|
entries: RwLock::new(FxHashMap::default()),
|
|
}
|
|
}
|
|
|
|
/// Copy out the file backend implementation content
|
|
fn get_impl(
|
|
&self,
|
|
id: &Uuid,
|
|
file_token: &FileTokenCheck,
|
|
origin_in: &ImmutableOrigin,
|
|
) -> Result<FileImpl, BlobURLStoreError> {
|
|
match self.entries.read().get(id) {
|
|
Some(entry) => {
|
|
if *origin_in != entry.origin {
|
|
Err(BlobURLStoreError::InvalidOrigin)
|
|
} else {
|
|
match file_token {
|
|
FileTokenCheck::NotRequired => Ok(entry.file_impl.clone()),
|
|
FileTokenCheck::Required(token) => {
|
|
if entry.outstanding_tokens.contains(token) {
|
|
return Ok(entry.file_impl.clone());
|
|
}
|
|
Err(BlobURLStoreError::InvalidFileID)
|
|
},
|
|
FileTokenCheck::ShouldFail => Err(BlobURLStoreError::InvalidFileID),
|
|
}
|
|
}
|
|
},
|
|
None => Err(BlobURLStoreError::InvalidFileID),
|
|
}
|
|
}
|
|
|
|
fn invalidate_token(&self, token: &FileTokenCheck, file_id: &Uuid) {
|
|
if let FileTokenCheck::Required(token) = token {
|
|
let mut entries = self.entries.write();
|
|
if let Some(entry) = entries.get_mut(file_id) {
|
|
entry.outstanding_tokens.remove(token);
|
|
|
|
// Check if there are references left.
|
|
let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
|
|
|
|
// Check if no other fetch has acquired a token for this file.
|
|
let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
|
|
|
|
// Check if there is still a blob URL outstanding.
|
|
let valid = entry.is_valid_url.load(Ordering::Acquire);
|
|
|
|
// Can we remove this file?
|
|
let do_remove = zero_refs && no_outstanding_tokens && !valid;
|
|
|
|
if do_remove {
|
|
entries.remove(file_id);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
pub(crate) fn get_token_for_file(&self, file_id: &Uuid) -> FileTokenCheck {
|
|
let mut entries = self.entries.write();
|
|
let parent_id = match entries.get(file_id) {
|
|
Some(entry) => {
|
|
if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
|
|
Some(*parent_id)
|
|
} else {
|
|
None
|
|
}
|
|
},
|
|
None => return FileTokenCheck::ShouldFail,
|
|
};
|
|
let file_id = match parent_id.as_ref() {
|
|
Some(id) => id,
|
|
None => file_id,
|
|
};
|
|
if let Some(entry) = entries.get_mut(file_id) {
|
|
if !entry.is_valid_url.load(Ordering::Acquire) {
|
|
return FileTokenCheck::ShouldFail;
|
|
}
|
|
let token = Uuid::new_v4();
|
|
entry.outstanding_tokens.insert(token);
|
|
return FileTokenCheck::Required(token);
|
|
}
|
|
FileTokenCheck::ShouldFail
|
|
}
|
|
|
|
fn insert(&self, id: Uuid, entry: FileStoreEntry) {
|
|
self.entries.write().insert(id, entry);
|
|
}
|
|
|
|
fn remove(&self, id: &Uuid) {
|
|
self.entries.write().remove(id);
|
|
}
|
|
|
|
fn inc_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
|
|
match self.entries.read().get(id) {
|
|
Some(entry) => {
|
|
if entry.origin == *origin_in {
|
|
entry.refs.fetch_add(1, Ordering::Relaxed);
|
|
Ok(())
|
|
} else {
|
|
Err(BlobURLStoreError::InvalidOrigin)
|
|
}
|
|
},
|
|
None => Err(BlobURLStoreError::InvalidFileID),
|
|
}
|
|
}
|
|
|
|
fn add_sliced_url_entry(
|
|
&self,
|
|
parent_id: Uuid,
|
|
rel_pos: RelativePos,
|
|
sender: IpcSender<Result<Uuid, BlobURLStoreError>>,
|
|
origin_in: ImmutableOrigin,
|
|
) {
|
|
match self.inc_ref(&parent_id, &origin_in) {
|
|
Ok(_) => {
|
|
let new_id = Uuid::new_v4();
|
|
self.insert(
|
|
new_id,
|
|
FileStoreEntry {
|
|
origin: origin_in,
|
|
file_impl: FileImpl::Sliced(parent_id, rel_pos),
|
|
refs: AtomicUsize::new(1),
|
|
// Valid here since AddSlicedURLEntry implies URL creation
|
|
// from a BlobImpl::Sliced
|
|
is_valid_url: AtomicBool::new(true),
|
|
outstanding_tokens: Default::default(),
|
|
},
|
|
);
|
|
|
|
// We assume that the returned id will be held by BlobImpl::File
|
|
let _ = sender.send(Ok(new_id));
|
|
},
|
|
Err(e) => {
|
|
let _ = sender.send(Err(e));
|
|
},
|
|
}
|
|
}
|
|
|
|
async fn select_files(
|
|
&self,
|
|
control_id: EmbedderControlId,
|
|
file_picker_request: FilePickerRequest,
|
|
embedder_proxy: GenericEmbedderProxy<NetToEmbedderMsg>,
|
|
) -> EmbedderControlResponse {
|
|
let (sender, receiver) = tokio::sync::oneshot::channel();
|
|
|
|
let origin = file_picker_request.origin.clone();
|
|
embedder_proxy.send(NetToEmbedderMsg::SelectFiles(
|
|
control_id,
|
|
file_picker_request,
|
|
sender,
|
|
));
|
|
|
|
let paths = match receiver.await {
|
|
Ok(Some(result)) => result,
|
|
Ok(None) => {
|
|
return EmbedderControlResponse::FilePicker(None);
|
|
},
|
|
Err(error) => {
|
|
warn!("Failed to receive files from embedder ({:?}).", error);
|
|
return EmbedderControlResponse::FilePicker(None);
|
|
},
|
|
};
|
|
|
|
let mut failed = false;
|
|
let files: Vec<_> = paths
|
|
.into_iter()
|
|
.filter_map(|path| match self.create_entry(&path, origin.clone()) {
|
|
Ok(entry) => Some(entry),
|
|
Err(error) => {
|
|
failed = true;
|
|
warn!("Failed to create entry for selected file: {error:?}");
|
|
None
|
|
},
|
|
})
|
|
.collect();
|
|
|
|
// From <https://w3c.github.io/webdriver/#dfn-element-send-keys>:
|
|
//
|
|
// > Step 8.5: Verify that each file given by the user exists. If any do not,
|
|
// > return error with error code invalid argument.
|
|
//
|
|
// WebDriver expects that if any of the files isn't found we don't select any files.
|
|
if failed {
|
|
for file in files.iter() {
|
|
self.remove(&file.id);
|
|
}
|
|
return EmbedderControlResponse::FilePicker(Some(Vec::new()));
|
|
}
|
|
|
|
EmbedderControlResponse::FilePicker(Some(files))
|
|
}
|
|
|
|
fn create_entry(
|
|
&self,
|
|
file_path: &Path,
|
|
origin: ImmutableOrigin,
|
|
) -> Result<SelectedFile, FileManagerThreadError> {
|
|
use net_traits::filemanager_thread::FileManagerThreadError::FileSystemError;
|
|
|
|
let file = File::open(file_path).map_err(|e| FileSystemError(e.to_string()))?;
|
|
let metadata = file
|
|
.metadata()
|
|
.map_err(|e| FileSystemError(e.to_string()))?;
|
|
let modified = metadata
|
|
.modified()
|
|
.map_err(|e| FileSystemError(e.to_string()))?;
|
|
let file_size = metadata.len();
|
|
let file_name = file_path
|
|
.file_name()
|
|
.ok_or(FileSystemError("Invalid filepath".to_string()))?;
|
|
|
|
let file_impl = FileImpl::MetaDataOnly(FileMetaData {
|
|
path: file_path.to_path_buf(),
|
|
size: file_size,
|
|
});
|
|
|
|
let id = Uuid::new_v4();
|
|
|
|
self.insert(
|
|
id,
|
|
FileStoreEntry {
|
|
origin,
|
|
file_impl,
|
|
refs: AtomicUsize::new(1),
|
|
// Invalid here since create_entry is called by file selection
|
|
is_valid_url: AtomicBool::new(false),
|
|
outstanding_tokens: Default::default(),
|
|
},
|
|
);
|
|
|
|
let filename_path = Path::new(file_name);
|
|
let type_string = match mime_guess::from_path(filename_path).first() {
|
|
Some(x) => format!("{}", x),
|
|
None => "".to_string(),
|
|
};
|
|
|
|
Ok(SelectedFile {
|
|
id,
|
|
filename: filename_path.to_path_buf(),
|
|
modified,
|
|
size: file_size,
|
|
type_string,
|
|
})
|
|
}
|
|
|
|
async fn get_blob_buf(
|
|
&self,
|
|
sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
|
|
id: &Uuid,
|
|
file_token: &FileTokenCheck,
|
|
origin_in: &ImmutableOrigin,
|
|
rel_pos: RelativePos,
|
|
) -> Result<(), BlobURLStoreError> {
|
|
let file_impl = self.get_impl(id, file_token, origin_in)?;
|
|
match file_impl {
|
|
FileImpl::Memory(buf) => {
|
|
let range = rel_pos.to_abs_range(buf.size as usize);
|
|
let buf = BlobBuf {
|
|
filename: None,
|
|
type_string: buf.type_string,
|
|
size: range.len() as u64,
|
|
bytes: buf.bytes.index(range).to_vec(),
|
|
};
|
|
|
|
let _ = sender.send(Ok(ReadFileProgress::Meta(buf)));
|
|
let _ = sender.send(Ok(ReadFileProgress::EOF));
|
|
|
|
Ok(())
|
|
},
|
|
FileImpl::MetaDataOnly(metadata) => {
|
|
/* XXX: Snapshot state check (optional) https://w3c.github.io/FileAPI/#snapshot-state.
|
|
Concretely, here we create another file, and this file might not
|
|
has the same underlying file state (meta-info plus content) as the time
|
|
create_entry is called.
|
|
*/
|
|
|
|
let opt_filename = metadata
|
|
.path
|
|
.file_name()
|
|
.and_then(|osstr| osstr.to_str())
|
|
.map(|s| s.to_string());
|
|
|
|
let mime = mime_guess::from_path(metadata.path.clone()).first();
|
|
let range = rel_pos.to_abs_range(metadata.size as usize);
|
|
|
|
let mut file = tokio::fs::File::open(&metadata.path)
|
|
.await
|
|
.map_err(|e| BlobURLStoreError::External(e.to_string()))?;
|
|
let seeked_start = file
|
|
.seek(SeekFrom::Start(range.start as u64))
|
|
.await
|
|
.map_err(|e| BlobURLStoreError::External(e.to_string()))?;
|
|
|
|
if seeked_start == (range.start as u64) {
|
|
let type_string = match mime {
|
|
Some(x) => format!("{}", x),
|
|
None => "".to_string(),
|
|
};
|
|
|
|
read_file_in_chunks(sender, file, range.len(), opt_filename, type_string).await;
|
|
Ok(())
|
|
} else {
|
|
Err(BlobURLStoreError::InvalidEntry)
|
|
}
|
|
},
|
|
FileImpl::Sliced(parent_id, inner_rel_pos) => {
|
|
// Next time we don't need to check validity since
|
|
// we have already done that for requesting URL if necessary
|
|
Box::pin(self.get_blob_buf(
|
|
sender,
|
|
&parent_id,
|
|
file_token,
|
|
origin_in,
|
|
rel_pos.slice_inner(&inner_rel_pos),
|
|
))
|
|
.await
|
|
},
|
|
}
|
|
}
|
|
|
|
// Convenient wrapper over get_blob_buf
|
|
async fn try_read_file(
|
|
&self,
|
|
sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
|
|
id: Uuid,
|
|
origin_in: ImmutableOrigin,
|
|
) -> Result<(), BlobURLStoreError> {
|
|
self.get_blob_buf(
|
|
sender,
|
|
&id,
|
|
&FileTokenCheck::NotRequired,
|
|
&origin_in,
|
|
RelativePos::full_range(),
|
|
)
|
|
.await
|
|
}
|
|
|
|
fn dec_ref(&self, id: &Uuid, origin_in: &ImmutableOrigin) -> Result<(), BlobURLStoreError> {
|
|
let (do_remove, opt_parent_id) = match self.entries.read().get(id) {
|
|
Some(entry) => {
|
|
if entry.origin == *origin_in {
|
|
let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
|
|
|
|
if old_refs > 1 {
|
|
// not the last reference, no need to touch parent
|
|
(false, None)
|
|
} else {
|
|
// last reference, and if it has a reference to parent id
|
|
// dec_ref on parent later if necessary
|
|
let is_valid = entry.is_valid_url.load(Ordering::Acquire);
|
|
|
|
// Check if no fetch has acquired a token for this file.
|
|
let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
|
|
|
|
// Can we remove this file?
|
|
let do_remove = !is_valid && no_outstanding_tokens;
|
|
|
|
if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
|
|
(do_remove, Some(*parent_id))
|
|
} else {
|
|
(do_remove, None)
|
|
}
|
|
}
|
|
} else {
|
|
return Err(BlobURLStoreError::InvalidOrigin);
|
|
}
|
|
},
|
|
None => return Err(BlobURLStoreError::InvalidFileID),
|
|
};
|
|
|
|
// Trigger removing if its last reference is gone and it is
|
|
// not a part of a valid Blob URL
|
|
if do_remove {
|
|
atomic::fence(Ordering::Acquire);
|
|
self.remove(id);
|
|
|
|
if let Some(parent_id) = opt_parent_id {
|
|
return self.dec_ref(&parent_id, origin_in);
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn promote_memory(
|
|
&self,
|
|
id: Uuid,
|
|
blob_buf: BlobBuf,
|
|
set_valid: bool,
|
|
origin: ImmutableOrigin,
|
|
) {
|
|
self.insert(
|
|
id,
|
|
FileStoreEntry {
|
|
origin,
|
|
file_impl: FileImpl::Memory(blob_buf),
|
|
refs: AtomicUsize::new(1),
|
|
is_valid_url: AtomicBool::new(set_valid),
|
|
outstanding_tokens: Default::default(),
|
|
},
|
|
);
|
|
}
|
|
|
|
fn set_blob_url_validity(
|
|
&self,
|
|
validity: bool,
|
|
id: &Uuid,
|
|
origin_in: &ImmutableOrigin,
|
|
) -> Result<(), BlobURLStoreError> {
|
|
let (do_remove, opt_parent_id, res) = match self.entries.read().get(id) {
|
|
Some(entry) => {
|
|
if entry.origin == *origin_in {
|
|
entry.is_valid_url.store(validity, Ordering::Release);
|
|
|
|
if !validity {
|
|
// Check if it is the last possible reference
|
|
// since refs only accounts for blob id holders
|
|
// and store entry id holders
|
|
let zero_refs = entry.refs.load(Ordering::Acquire) == 0;
|
|
|
|
// Check if no fetch has acquired a token for this file.
|
|
let no_outstanding_tokens = entry.outstanding_tokens.is_empty();
|
|
|
|
// Can we remove this file?
|
|
let do_remove = zero_refs && no_outstanding_tokens;
|
|
|
|
if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
|
|
(do_remove, Some(*parent_id), Ok(()))
|
|
} else {
|
|
(do_remove, None, Ok(()))
|
|
}
|
|
} else {
|
|
(false, None, Ok(()))
|
|
}
|
|
} else {
|
|
(false, None, Err(BlobURLStoreError::InvalidOrigin))
|
|
}
|
|
},
|
|
None => (false, None, Err(BlobURLStoreError::InvalidFileID)),
|
|
};
|
|
|
|
if do_remove {
|
|
atomic::fence(Ordering::Acquire);
|
|
self.remove(id);
|
|
|
|
if let Some(parent_id) = opt_parent_id {
|
|
return self.dec_ref(&parent_id, origin_in);
|
|
}
|
|
}
|
|
res
|
|
}
|
|
}
|
|
|
|
async fn read_file_in_chunks(
|
|
sender: &IpcSender<FileManagerResult<ReadFileProgress>>,
|
|
mut file: tokio::fs::File,
|
|
size: usize,
|
|
opt_filename: Option<String>,
|
|
type_string: String,
|
|
) {
|
|
// First chunk
|
|
let mut buf = vec![0; FILE_CHUNK_SIZE];
|
|
match file.read(&mut buf).await {
|
|
Ok(n) => {
|
|
buf.truncate(n);
|
|
let blob_buf = BlobBuf {
|
|
filename: opt_filename,
|
|
type_string,
|
|
size: size as u64,
|
|
bytes: buf,
|
|
};
|
|
let _ = sender.send(Ok(ReadFileProgress::Meta(blob_buf)));
|
|
},
|
|
Err(e) => {
|
|
let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
|
|
return;
|
|
},
|
|
}
|
|
|
|
// Send the remaining chunks
|
|
loop {
|
|
let mut buf = vec![0; FILE_CHUNK_SIZE];
|
|
match file.read(&mut buf).await {
|
|
Ok(0) => {
|
|
let _ = sender.send(Ok(ReadFileProgress::EOF));
|
|
return;
|
|
},
|
|
Ok(n) => {
|
|
buf.truncate(n);
|
|
let _ = sender.send(Ok(ReadFileProgress::Partial(buf)));
|
|
},
|
|
Err(e) => {
|
|
let _ = sender.send(Err(FileManagerThreadError::FileSystemError(e.to_string())));
|
|
return;
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
fn set_headers(
|
|
headers: &mut HeaderMap,
|
|
content_length: u64,
|
|
mime: Mime,
|
|
filename: Option<String>,
|
|
content_range: Option<ContentRange>,
|
|
) {
|
|
headers.typed_insert(ContentLength(content_length));
|
|
if let Some(content_range) = content_range {
|
|
headers.typed_insert(content_range);
|
|
}
|
|
headers.typed_insert(ContentType::from(mime.clone()));
|
|
let name = match filename {
|
|
Some(name) => name,
|
|
None => return,
|
|
};
|
|
let charset = mime.get_param(mime::CHARSET);
|
|
let charset = charset
|
|
.map(|c| c.as_ref().into())
|
|
.unwrap_or("us-ascii".to_owned());
|
|
// TODO(eijebong): Replace this once the typed header is there
|
|
// https://github.com/hyperium/headers/issues/8
|
|
headers.insert(
|
|
header::CONTENT_DISPOSITION,
|
|
HeaderValue::from_bytes(
|
|
format!(
|
|
"inline; {}",
|
|
if charset.to_lowercase() == "utf-8" {
|
|
format!(
|
|
"filename=\"{}\"",
|
|
String::from_utf8(name.as_bytes().into()).unwrap()
|
|
)
|
|
} else {
|
|
format!(
|
|
"filename*=\"{}\"''{}",
|
|
charset,
|
|
http_percent_encode(name.as_bytes())
|
|
)
|
|
}
|
|
)
|
|
.as_bytes(),
|
|
)
|
|
.unwrap(),
|
|
);
|
|
}
|