diff --git a/components/storage/indexeddb/engines/sqlite.rs b/components/storage/indexeddb/engines/sqlite.rs index c2fe5a67858..a7297f93c4b 100644 --- a/components/storage/indexeddb/engines/sqlite.rs +++ b/components/storage/indexeddb/engines/sqlite.rs @@ -18,8 +18,8 @@ use storage_traits::indexeddb_thread::{ }; use tokio::sync::oneshot; +use crate::indexeddb::IndexedDBDescription; use crate::indexeddb::engines::{KvsEngine, KvsTransaction}; -use crate::indexeddb::idb_thread::IndexedDBDescription; use crate::shared::{DB_INIT_PRAGMAS, DB_PRAGMAS}; mod create; diff --git a/components/storage/indexeddb/idb_thread.rs b/components/storage/indexeddb/idb_thread.rs deleted file mode 100644 index 507351d1735..00000000000 --- a/components/storage/indexeddb/idb_thread.rs +++ /dev/null @@ -1,441 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ -use std::borrow::ToOwned; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, VecDeque}; -use std::path::PathBuf; -use std::sync::Arc; -use std::thread; - -use base::threadpool::ThreadPool; -use ipc_channel::ipc::{self, IpcError, IpcReceiver, IpcSender}; -use log::{debug, warn}; -use rustc_hash::FxHashMap; -use servo_config::pref; -use servo_url::origin::ImmutableOrigin; -use storage_traits::indexeddb_thread::{ - AsyncOperation, BackendError, BackendResult, CreateObjectResult, DbResult, IndexedDBThreadMsg, - IndexedDBTxnMode, KeyPath, SyncOperation, -}; -use uuid::Uuid; - -use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine}; - -pub trait IndexedDBThreadFactory { - fn new(config_dir: Option) -> Self; -} - -impl IndexedDBThreadFactory for IpcSender { - fn new(config_dir: Option) -> IpcSender { - let (chan, port) = ipc::channel().unwrap(); - - let mut idb_base_dir = PathBuf::new(); - if let Some(p) = config_dir { - idb_base_dir.push(p); - } - idb_base_dir.push("IndexedDB"); - - thread::Builder::new() - .name("IndexedDBManager".to_owned()) - .spawn(move || { - IndexedDBManager::new(port, idb_base_dir).start(); - }) - .expect("Thread spawning failed"); - - chan - } -} - -#[derive(Clone, Eq, Hash, PartialEq)] -pub struct IndexedDBDescription { - pub origin: ImmutableOrigin, - pub name: String, -} - -impl IndexedDBDescription { - // randomly generated namespace for our purposes - const NAMESPACE_SERVO_IDB: &uuid::Uuid = &Uuid::from_bytes([ - 0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc2, 0xa0, 0xdb, 0xe2, 0x18, 0xc5, 0xc8, 0xa3, - 0x5d, - ]); - // Converts the database description to a folder name where all - // data for this database is stored - pub(super) fn as_path(&self) -> PathBuf { - let mut path = PathBuf::new(); - - // uuid v5 is deterministic - let origin_uuid = Uuid::new_v5( - Self::NAMESPACE_SERVO_IDB, - self.origin.ascii_serialization().as_bytes(), - ); - let db_name_uuid = Uuid::new_v5(Self::NAMESPACE_SERVO_IDB, self.name.as_bytes()); - path.push(origin_uuid.to_string()); - path.push(db_name_uuid.to_string()); - - path - } -} - -struct IndexedDBEnvironment { - engine: E, - transactions: FxHashMap, - serial_number_counter: u64, -} - -impl IndexedDBEnvironment { - fn new(engine: E) -> IndexedDBEnvironment { - IndexedDBEnvironment { - engine, - transactions: FxHashMap::default(), - serial_number_counter: 0, - } - } - - fn queue_operation( - &mut self, - store_name: &str, - serial_number: u64, - mode: IndexedDBTxnMode, - operation: AsyncOperation, - ) { - self.transactions - .entry(serial_number) - .or_insert_with(|| KvsTransaction { - requests: VecDeque::new(), - mode, - }) - .requests - .push_back(KvsOperation { - operation, - store_name: String::from(store_name), - }); - } - - // Executes all requests for a transaction (without committing) - fn start_transaction(&mut self, txn: u64, sender: Option>>) { - // FIXME:(arihant2math) find optimizations in this function - // rather than on the engine level code (less repetition) - if let Some(txn) = self.transactions.remove(&txn) { - let _ = self.engine.process_transaction(txn).blocking_recv(); - } - - // We have a sender if the transaction is started manually, and they - // probably want to know when it is finished - if let Some(sender) = sender { - if sender.send(Ok(())).is_err() { - warn!("IDBTransaction starter dropped its channel"); - } - } - } - - fn has_key_generator(&self, store_name: &str) -> bool { - self.engine.has_key_generator(store_name) - } - - fn key_path(&self, store_name: &str) -> Option { - self.engine.key_path(store_name) - } - - fn create_index( - &self, - store_name: &str, - index_name: String, - key_path: KeyPath, - unique: bool, - multi_entry: bool, - ) -> DbResult { - self.engine - .create_index(store_name, index_name, key_path, unique, multi_entry) - .map_err(|err| format!("{err:?}")) - } - - fn delete_index(&self, store_name: &str, index_name: String) -> DbResult<()> { - self.engine - .delete_index(store_name, index_name) - .map_err(|err| format!("{err:?}")) - } - - fn create_object_store( - &mut self, - store_name: &str, - key_path: Option, - auto_increment: bool, - ) -> DbResult { - self.engine - .create_store(store_name, key_path, auto_increment) - .map_err(|err| format!("{err:?}")) - } - - fn delete_object_store(&mut self, store_name: &str) -> DbResult<()> { - let result = self.engine.delete_store(store_name); - result.map_err(|err| format!("{err:?}")) - } - - fn delete_database(self, sender: IpcSender>) { - let result = self.engine.delete_database(); - let _ = sender.send( - result - .map_err(|err| format!("{err:?}")) - .map_err(BackendError::from), - ); - } - - fn version(&self) -> DbResult { - self.engine.version().map_err(|err| format!("{err:?}")) - } - - fn set_version(&mut self, version: u64) -> DbResult<()> { - self.engine - .set_version(version) - .map_err(|err| format!("{err:?}")) - } -} - -struct IndexedDBManager { - port: IpcReceiver, - idb_base_dir: PathBuf, - databases: HashMap>, - thread_pool: Arc, -} - -impl IndexedDBManager { - fn new(port: IpcReceiver, idb_base_dir: PathBuf) -> IndexedDBManager { - debug!("New indexedDBManager"); - - // Uses an estimate of the system cpus to process IndexedDB transactions - // See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html - // If no information can be obtained about the system, uses 4 threads as a default - let thread_count = thread::available_parallelism() - .map(|i| i.get()) - .unwrap_or(pref!(threadpools_fallback_worker_num) as usize) - .min(pref!(threadpools_indexeddb_workers_max).max(1) as usize); - - IndexedDBManager { - port, - idb_base_dir, - databases: HashMap::new(), - thread_pool: Arc::new(ThreadPool::new(thread_count, "IndexedDB".to_string())), - } - } -} - -impl IndexedDBManager { - fn start(&mut self) { - loop { - // FIXME:(arihant2math) No message *most likely* means that - // the ipc sender has been dropped, so we break the look - let message = match self.port.recv() { - Ok(msg) => msg, - Err(e) => match e { - IpcError::Disconnected => { - break; - }, - other => { - warn!("Error in IndexedDB thread: {:?}", other); - continue; - }, - }, - }; - match message { - IndexedDBThreadMsg::Sync(operation) => { - self.handle_sync_operation(operation); - }, - IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - // Queues an operation for a transaction without starting it - db.queue_operation(&store_name, txn, mode, operation); - // FIXME:(arihant2math) Schedule transactions properly - // while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) { - // std::hint::spin_loop(); - // } - db.start_transaction(txn, None); - } - }, - } - } - } - - fn get_database( - &self, - origin: ImmutableOrigin, - db_name: String, - ) -> Option<&IndexedDBEnvironment> { - let idb_description = IndexedDBDescription { - origin, - name: db_name, - }; - - self.databases.get(&idb_description) - } - - fn get_database_mut( - &mut self, - origin: ImmutableOrigin, - db_name: String, - ) -> Option<&mut IndexedDBEnvironment> { - let idb_description = IndexedDBDescription { - origin, - name: db_name, - }; - - self.databases.get_mut(&idb_description) - } - - fn handle_sync_operation(&mut self, operation: SyncOperation) { - match operation { - SyncOperation::CloseDatabase(sender, origin, db_name) => { - let idb_description = IndexedDBDescription { - origin, - name: db_name, - }; - if let Some(_db) = self.databases.remove(&idb_description) { - // TODO: maybe a close database function should be added to the trait and called here? - } - let _ = sender.send(Ok(())); - }, - SyncOperation::OpenDatabase(sender, origin, db_name, version) => { - let idb_description = IndexedDBDescription { - origin, - name: db_name, - }; - - let idb_base_dir = self.idb_base_dir.as_path(); - - let version = version.unwrap_or(0); - - match self.databases.entry(idb_description.clone()) { - Entry::Vacant(e) => { - let db = IndexedDBEnvironment::new( - SqliteEngine::new( - idb_base_dir, - &idb_description, - self.thread_pool.clone(), - ) - .expect("Failed to create sqlite engine"), - ); - let _ = sender.send(db.version().unwrap_or(version)); - e.insert(db); - }, - Entry::Occupied(db) => { - let _ = sender.send(db.get().version().unwrap_or(version)); - }, - } - }, - SyncOperation::DeleteDatabase(sender, origin, db_name) => { - // https://w3c.github.io/IndexedDB/#delete-a-database - // Step 4. Let db be the database named name in storageKey, - // if one exists. Otherwise, return 0 (zero). - let idb_description = IndexedDBDescription { - origin, - name: db_name, - }; - if let Some(db) = self.databases.remove(&idb_description) { - db.delete_database(sender); - } else { - let _ = sender.send(Ok(())); - } - }, - SyncOperation::HasKeyGenerator(sender, origin, db_name, store_name) => { - let result = self - .get_database(origin, db_name) - .map(|db| db.has_key_generator(&store_name)); - let _ = sender.send(result.ok_or(BackendError::DbNotFound)); - }, - SyncOperation::KeyPath(sender, origin, db_name, store_name) => { - let result = self - .get_database(origin, db_name) - .map(|db| db.key_path(&store_name)); - let _ = sender.send(result.ok_or(BackendError::DbNotFound)); - }, - SyncOperation::CreateIndex( - sender, - origin, - db_name, - store_name, - index_name, - key_path, - unique, - multi_entry, - ) => { - if let Some(db) = self.get_database(origin, db_name) { - let result = - db.create_index(&store_name, index_name, key_path, unique, multi_entry); - let _ = sender.send(result.map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => { - if let Some(db) = self.get_database(origin, db_name) { - let result = db.delete_index(&store_name, index_name); - let _ = sender.send(result.map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::Commit(sender, _origin, _db_name, _txn) => { - // FIXME:(arihant2math) This does nothing at the moment - let _ = sender.send(Ok(())); - }, - SyncOperation::UpgradeVersion(sender, origin, db_name, _txn, version) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - if version > db.version().unwrap_or(0) { - let _ = db.set_version(version); - } - // erroring out if the version is not upgraded can be and non-replicable - let _ = sender.send(db.version().map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::CreateObjectStore( - sender, - origin, - db_name, - store_name, - key_paths, - auto_increment, - ) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - let result = db.create_object_store(&store_name, key_paths, auto_increment); - let _ = sender.send(result.map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - let result = db.delete_object_store(&store_name); - let _ = sender.send(result.map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::StartTransaction(sender, origin, db_name, txn) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - db.start_transaction(txn, Some(sender)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::Version(sender, origin, db_name) => { - if let Some(db) = self.get_database(origin, db_name) { - let _ = sender.send(db.version().map_err(BackendError::from)); - } else { - let _ = sender.send(Err(BackendError::DbNotFound)); - } - }, - SyncOperation::RegisterNewTxn(sender, origin, db_name) => { - if let Some(db) = self.get_database_mut(origin, db_name) { - db.serial_number_counter += 1; - let _ = sender.send(db.serial_number_counter); - } - }, - SyncOperation::Exit(sender) => { - // FIXME:(rasviitanen) Nothing to do? - let _ = sender.send(()); - }, - } - } -} diff --git a/components/storage/indexeddb/mod.rs b/components/storage/indexeddb/mod.rs index 7416bf83b4b..c5f7610af98 100644 --- a/components/storage/indexeddb/mod.rs +++ b/components/storage/indexeddb/mod.rs @@ -2,8 +2,443 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ -pub use self::idb_thread::IndexedDBThreadFactory; +mod engines; -pub(super) mod engines; +use std::borrow::ToOwned; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, VecDeque}; +use std::path::PathBuf; +use std::sync::Arc; +use std::thread; -pub mod idb_thread; +use base::threadpool::ThreadPool; +use ipc_channel::ipc::{self, IpcError, IpcReceiver, IpcSender}; +use log::{debug, warn}; +use rustc_hash::FxHashMap; +use servo_config::pref; +use servo_url::origin::ImmutableOrigin; +use storage_traits::indexeddb_thread::{ + AsyncOperation, BackendError, BackendResult, CreateObjectResult, DbResult, IndexedDBThreadMsg, + IndexedDBTxnMode, KeyPath, SyncOperation, +}; +use uuid::Uuid; + +use crate::indexeddb::engines::{KvsEngine, KvsOperation, KvsTransaction, SqliteEngine}; + +pub trait IndexedDBThreadFactory { + fn new(config_dir: Option) -> Self; +} + +impl IndexedDBThreadFactory for IpcSender { + fn new(config_dir: Option) -> IpcSender { + let (chan, port) = ipc::channel().unwrap(); + + let mut idb_base_dir = PathBuf::new(); + if let Some(p) = config_dir { + idb_base_dir.push(p); + } + idb_base_dir.push("IndexedDB"); + + thread::Builder::new() + .name("IndexedDBManager".to_owned()) + .spawn(move || { + IndexedDBManager::new(port, idb_base_dir).start(); + }) + .expect("Thread spawning failed"); + + chan + } +} + +#[derive(Clone, Eq, Hash, PartialEq)] +pub struct IndexedDBDescription { + pub origin: ImmutableOrigin, + pub name: String, +} + +impl IndexedDBDescription { + // randomly generated namespace for our purposes + const NAMESPACE_SERVO_IDB: &uuid::Uuid = &Uuid::from_bytes([ + 0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc2, 0xa0, 0xdb, 0xe2, 0x18, 0xc5, 0xc8, 0xa3, + 0x5d, + ]); + // Converts the database description to a folder name where all + // data for this database is stored + pub(super) fn as_path(&self) -> PathBuf { + let mut path = PathBuf::new(); + + // uuid v5 is deterministic + let origin_uuid = Uuid::new_v5( + Self::NAMESPACE_SERVO_IDB, + self.origin.ascii_serialization().as_bytes(), + ); + let db_name_uuid = Uuid::new_v5(Self::NAMESPACE_SERVO_IDB, self.name.as_bytes()); + path.push(origin_uuid.to_string()); + path.push(db_name_uuid.to_string()); + + path + } +} + +struct IndexedDBEnvironment { + engine: E, + transactions: FxHashMap, + serial_number_counter: u64, +} + +impl IndexedDBEnvironment { + fn new(engine: E) -> IndexedDBEnvironment { + IndexedDBEnvironment { + engine, + transactions: FxHashMap::default(), + serial_number_counter: 0, + } + } + + fn queue_operation( + &mut self, + store_name: &str, + serial_number: u64, + mode: IndexedDBTxnMode, + operation: AsyncOperation, + ) { + self.transactions + .entry(serial_number) + .or_insert_with(|| KvsTransaction { + requests: VecDeque::new(), + mode, + }) + .requests + .push_back(KvsOperation { + operation, + store_name: String::from(store_name), + }); + } + + // Executes all requests for a transaction (without committing) + fn start_transaction(&mut self, txn: u64, sender: Option>>) { + // FIXME:(arihant2math) find optimizations in this function + // rather than on the engine level code (less repetition) + if let Some(txn) = self.transactions.remove(&txn) { + let _ = self.engine.process_transaction(txn).blocking_recv(); + } + + // We have a sender if the transaction is started manually, and they + // probably want to know when it is finished + if let Some(sender) = sender { + if sender.send(Ok(())).is_err() { + warn!("IDBTransaction starter dropped its channel"); + } + } + } + + fn has_key_generator(&self, store_name: &str) -> bool { + self.engine.has_key_generator(store_name) + } + + fn key_path(&self, store_name: &str) -> Option { + self.engine.key_path(store_name) + } + + fn create_index( + &self, + store_name: &str, + index_name: String, + key_path: KeyPath, + unique: bool, + multi_entry: bool, + ) -> DbResult { + self.engine + .create_index(store_name, index_name, key_path, unique, multi_entry) + .map_err(|err| format!("{err:?}")) + } + + fn delete_index(&self, store_name: &str, index_name: String) -> DbResult<()> { + self.engine + .delete_index(store_name, index_name) + .map_err(|err| format!("{err:?}")) + } + + fn create_object_store( + &mut self, + store_name: &str, + key_path: Option, + auto_increment: bool, + ) -> DbResult { + self.engine + .create_store(store_name, key_path, auto_increment) + .map_err(|err| format!("{err:?}")) + } + + fn delete_object_store(&mut self, store_name: &str) -> DbResult<()> { + let result = self.engine.delete_store(store_name); + result.map_err(|err| format!("{err:?}")) + } + + fn delete_database(self, sender: IpcSender>) { + let result = self.engine.delete_database(); + let _ = sender.send( + result + .map_err(|err| format!("{err:?}")) + .map_err(BackendError::from), + ); + } + + fn version(&self) -> DbResult { + self.engine.version().map_err(|err| format!("{err:?}")) + } + + fn set_version(&mut self, version: u64) -> DbResult<()> { + self.engine + .set_version(version) + .map_err(|err| format!("{err:?}")) + } +} + +struct IndexedDBManager { + port: IpcReceiver, + idb_base_dir: PathBuf, + databases: HashMap>, + thread_pool: Arc, +} + +impl IndexedDBManager { + fn new(port: IpcReceiver, idb_base_dir: PathBuf) -> IndexedDBManager { + debug!("New indexedDBManager"); + + // Uses an estimate of the system cpus to process IndexedDB transactions + // See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html + // If no information can be obtained about the system, uses 4 threads as a default + let thread_count = thread::available_parallelism() + .map(|i| i.get()) + .unwrap_or(pref!(threadpools_fallback_worker_num) as usize) + .min(pref!(threadpools_indexeddb_workers_max).max(1) as usize); + + IndexedDBManager { + port, + idb_base_dir, + databases: HashMap::new(), + thread_pool: Arc::new(ThreadPool::new(thread_count, "IndexedDB".to_string())), + } + } +} + +impl IndexedDBManager { + fn start(&mut self) { + loop { + // FIXME:(arihant2math) No message *most likely* means that + // the ipc sender has been dropped, so we break the look + let message = match self.port.recv() { + Ok(msg) => msg, + Err(e) => match e { + IpcError::Disconnected => { + break; + }, + other => { + warn!("Error in IndexedDB thread: {:?}", other); + continue; + }, + }, + }; + match message { + IndexedDBThreadMsg::Sync(operation) => { + self.handle_sync_operation(operation); + }, + IndexedDBThreadMsg::Async(origin, db_name, store_name, txn, mode, operation) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + // Queues an operation for a transaction without starting it + db.queue_operation(&store_name, txn, mode, operation); + // FIXME:(arihant2math) Schedule transactions properly + // while db.transactions.iter().any(|s| s.1.mode == IndexedDBTxnMode::Readwrite) { + // std::hint::spin_loop(); + // } + db.start_transaction(txn, None); + } + }, + } + } + } + + fn get_database( + &self, + origin: ImmutableOrigin, + db_name: String, + ) -> Option<&IndexedDBEnvironment> { + let idb_description = IndexedDBDescription { + origin, + name: db_name, + }; + + self.databases.get(&idb_description) + } + + fn get_database_mut( + &mut self, + origin: ImmutableOrigin, + db_name: String, + ) -> Option<&mut IndexedDBEnvironment> { + let idb_description = IndexedDBDescription { + origin, + name: db_name, + }; + + self.databases.get_mut(&idb_description) + } + + fn handle_sync_operation(&mut self, operation: SyncOperation) { + match operation { + SyncOperation::CloseDatabase(sender, origin, db_name) => { + let idb_description = IndexedDBDescription { + origin, + name: db_name, + }; + if let Some(_db) = self.databases.remove(&idb_description) { + // TODO: maybe a close database function should be added to the trait and called here? + } + let _ = sender.send(Ok(())); + }, + SyncOperation::OpenDatabase(sender, origin, db_name, version) => { + let idb_description = IndexedDBDescription { + origin, + name: db_name, + }; + + let idb_base_dir = self.idb_base_dir.as_path(); + + let version = version.unwrap_or(0); + + match self.databases.entry(idb_description.clone()) { + Entry::Vacant(e) => { + let db = IndexedDBEnvironment::new( + SqliteEngine::new( + idb_base_dir, + &idb_description, + self.thread_pool.clone(), + ) + .expect("Failed to create sqlite engine"), + ); + let _ = sender.send(db.version().unwrap_or(version)); + e.insert(db); + }, + Entry::Occupied(db) => { + let _ = sender.send(db.get().version().unwrap_or(version)); + }, + } + }, + SyncOperation::DeleteDatabase(sender, origin, db_name) => { + // https://w3c.github.io/IndexedDB/#delete-a-database + // Step 4. Let db be the database named name in storageKey, + // if one exists. Otherwise, return 0 (zero). + let idb_description = IndexedDBDescription { + origin, + name: db_name, + }; + if let Some(db) = self.databases.remove(&idb_description) { + db.delete_database(sender); + } else { + let _ = sender.send(Ok(())); + } + }, + SyncOperation::HasKeyGenerator(sender, origin, db_name, store_name) => { + let result = self + .get_database(origin, db_name) + .map(|db| db.has_key_generator(&store_name)); + let _ = sender.send(result.ok_or(BackendError::DbNotFound)); + }, + SyncOperation::KeyPath(sender, origin, db_name, store_name) => { + let result = self + .get_database(origin, db_name) + .map(|db| db.key_path(&store_name)); + let _ = sender.send(result.ok_or(BackendError::DbNotFound)); + }, + SyncOperation::CreateIndex( + sender, + origin, + db_name, + store_name, + index_name, + key_path, + unique, + multi_entry, + ) => { + if let Some(db) = self.get_database(origin, db_name) { + let result = + db.create_index(&store_name, index_name, key_path, unique, multi_entry); + let _ = sender.send(result.map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::DeleteIndex(sender, origin, db_name, store_name, index_name) => { + if let Some(db) = self.get_database(origin, db_name) { + let result = db.delete_index(&store_name, index_name); + let _ = sender.send(result.map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::Commit(sender, _origin, _db_name, _txn) => { + // FIXME:(arihant2math) This does nothing at the moment + let _ = sender.send(Ok(())); + }, + SyncOperation::UpgradeVersion(sender, origin, db_name, _txn, version) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + if version > db.version().unwrap_or(0) { + let _ = db.set_version(version); + } + // erroring out if the version is not upgraded can be and non-replicable + let _ = sender.send(db.version().map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::CreateObjectStore( + sender, + origin, + db_name, + store_name, + key_paths, + auto_increment, + ) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + let result = db.create_object_store(&store_name, key_paths, auto_increment); + let _ = sender.send(result.map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::DeleteObjectStore(sender, origin, db_name, store_name) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + let result = db.delete_object_store(&store_name); + let _ = sender.send(result.map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::StartTransaction(sender, origin, db_name, txn) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + db.start_transaction(txn, Some(sender)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::Version(sender, origin, db_name) => { + if let Some(db) = self.get_database(origin, db_name) { + let _ = sender.send(db.version().map_err(BackendError::from)); + } else { + let _ = sender.send(Err(BackendError::DbNotFound)); + } + }, + SyncOperation::RegisterNewTxn(sender, origin, db_name) => { + if let Some(db) = self.get_database_mut(origin, db_name) { + db.serial_number_counter += 1; + let _ = sender.send(db.serial_number_counter); + } + }, + SyncOperation::Exit(sender) => { + // FIXME:(rasviitanen) Nothing to do? + let _ = sender.send(()); + }, + } + } +} diff --git a/components/storage/webstorage/engines/mod.rs b/components/storage/webstorage/engines/mod.rs index 1d7b275ea95..1d2023f8c05 100644 --- a/components/storage/webstorage/engines/mod.rs +++ b/components/storage/webstorage/engines/mod.rs @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ -use crate::webstorage::webstorage_thread::OriginEntry; +use crate::webstorage::OriginEntry; pub mod sqlite; diff --git a/components/storage/webstorage/engines/sqlite.rs b/components/storage/webstorage/engines/sqlite.rs index a099f36d25a..8484a54c2da 100644 --- a/components/storage/webstorage/engines/sqlite.rs +++ b/components/storage/webstorage/engines/sqlite.rs @@ -10,8 +10,8 @@ use log::error; use rusqlite::Connection; use crate::shared::{DB_INIT_PRAGMAS, DB_PRAGMAS}; +use crate::webstorage::OriginEntry; use crate::webstorage::engines::WebStorageEngine; -use crate::webstorage::webstorage_thread::OriginEntry; pub struct SqliteEngine { connection: Connection, diff --git a/components/storage/webstorage/mod.rs b/components/storage/webstorage/mod.rs index 142b576ba1a..121318595b7 100644 --- a/components/storage/webstorage/mod.rs +++ b/components/storage/webstorage/mod.rs @@ -3,6 +3,488 @@ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ mod engines; -pub mod webstorage_thread; -pub use webstorage_thread::WebStorageThreadFactory; +use std::borrow::ToOwned; +use std::collections::BTreeMap; +use std::path::PathBuf; +use std::sync::Arc; +use std::thread; + +use base::generic_channel::{self, GenericReceiver, GenericSender}; +use base::id::WebViewId; +use base::threadpool::ThreadPool; +use malloc_size_of::MallocSizeOf; +use malloc_size_of_derive::MallocSizeOf; +use profile_traits::mem::{ + ProcessReports, ProfilerChan as MemProfilerChan, Report, ReportKind, perform_memory_report, +}; +use profile_traits::path; +use rustc_hash::FxHashMap; +use servo_config::pref; +use servo_url::{ImmutableOrigin, ServoUrl}; +use storage_traits::webstorage_thread::{StorageType, WebStorageThreadMsg}; +use uuid::Uuid; + +use crate::webstorage::engines::WebStorageEngine; +use crate::webstorage::engines::sqlite::SqliteEngine; + +const QUOTA_SIZE_LIMIT: usize = 5 * 1024 * 1024; + +pub trait WebStorageThreadFactory { + fn new(config_dir: Option, mem_profiler_chan: MemProfilerChan) -> Self; +} + +impl WebStorageThreadFactory for GenericSender { + /// Create a storage thread + fn new( + config_dir: Option, + mem_profiler_chan: MemProfilerChan, + ) -> GenericSender { + let (chan, port) = generic_channel::channel().unwrap(); + let chan2 = chan.clone(); + thread::Builder::new() + .name("WebStorageManager".to_owned()) + .spawn(move || { + mem_profiler_chan.run_with_memory_reporting( + || WebStorageManager::new(port, config_dir).start(), + String::from("storage-reporter"), + chan2, + WebStorageThreadMsg::CollectMemoryReport, + ); + }) + .expect("Thread spawning failed"); + chan + } +} + +#[derive(Clone, Default, MallocSizeOf)] +pub struct OriginEntry { + tree: BTreeMap, + size: usize, +} + +impl OriginEntry { + pub fn inner(&self) -> &BTreeMap { + &self.tree + } + + pub fn insert(&mut self, key: String, value: String) -> Option { + let old_value = self.tree.insert(key.clone(), value.clone()); + let size_change = match &old_value { + Some(old) => value.len() as isize - old.len() as isize, + None => (key.len() + value.len()) as isize, + }; + self.size = (self.size as isize + size_change) as usize; + old_value + } + + pub fn remove(&mut self, key: &str) -> Option { + let old_value = self.tree.remove(key); + if let Some(old) = &old_value { + self.size -= key.len() + old.len(); + } + old_value + } + + pub fn clear(&mut self) { + self.tree.clear(); + self.size = 0; + } + + pub fn size(&self) -> usize { + self.size + } +} + +struct WebStorageEnvironment { + engine: E, + data: OriginEntry, +} + +impl MallocSizeOf for WebStorageEnvironment { + fn size_of(&self, ops: &mut malloc_size_of::MallocSizeOfOps) -> usize { + self.data.size_of(ops) + } +} + +impl WebStorageEnvironment { + fn new(engine: E) -> Self { + WebStorageEnvironment { + data: engine.load().unwrap_or_default(), + engine, + } + } + + fn clear(&mut self) { + self.data.clear(); + let _ = self.engine.clear(); + } + + fn delete(&mut self, key: &str) { + let _ = self.engine.delete(key); + } + + fn set(&mut self, key: &str, value: &str) { + let _ = self.engine.set(key, value); + } +} + +impl Drop for WebStorageEnvironment { + fn drop(&mut self) { + self.engine.save(&self.data); + } +} + +struct WebStorageManager { + port: GenericReceiver, + session_data: FxHashMap>, + config_dir: Option, + thread_pool: Arc, + environments: FxHashMap>, +} + +impl WebStorageManager { + fn new( + port: GenericReceiver, + config_dir: Option, + ) -> WebStorageManager { + // Uses an estimate of the system cpus to process Webstorage transactions + // See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html + // If no information can be obtained about the system, uses 4 threads as a default + let thread_count = thread::available_parallelism() + .map(|i| i.get()) + .unwrap_or(pref!(threadpools_fallback_worker_num) as usize) + .min(pref!(threadpools_webstorage_workers_max).max(1) as usize); + WebStorageManager { + port, + session_data: FxHashMap::default(), + config_dir, + thread_pool: Arc::new(ThreadPool::new(thread_count, "WebStorage".to_string())), + environments: FxHashMap::default(), + } + } +} + +impl WebStorageManager { + fn start(&mut self) { + loop { + match self.port.recv().unwrap() { + WebStorageThreadMsg::Length(sender, storage_type, webview_id, url) => { + self.length(sender, storage_type, webview_id, url) + }, + WebStorageThreadMsg::Key(sender, storage_type, webview_id, url, index) => { + self.key(sender, storage_type, webview_id, url, index) + }, + WebStorageThreadMsg::Keys(sender, storage_type, webview_id, url) => { + self.keys(sender, storage_type, webview_id, url) + }, + WebStorageThreadMsg::SetItem( + sender, + storage_type, + webview_id, + url, + name, + value, + ) => { + self.set_item(sender, storage_type, webview_id, url, name, value); + }, + WebStorageThreadMsg::GetItem(sender, storage_type, webview_id, url, name) => { + self.request_item(sender, storage_type, webview_id, url, name) + }, + WebStorageThreadMsg::RemoveItem(sender, storage_type, webview_id, url, name) => { + self.remove_item(sender, storage_type, webview_id, url, name); + }, + WebStorageThreadMsg::Clear(sender, storage_type, webview_id, url) => { + self.clear(sender, storage_type, webview_id, url); + }, + WebStorageThreadMsg::Clone { + sender, + src: src_webview_id, + dest: dest_webview_id, + } => { + self.clone(src_webview_id, dest_webview_id); + let _ = sender.send(()); + }, + WebStorageThreadMsg::CollectMemoryReport(sender) => { + let reports = self.collect_memory_reports(); + sender.send(ProcessReports::new(reports)); + }, + WebStorageThreadMsg::Exit(sender) => { + // Nothing to do since we save localstorage set eagerly. + let _ = sender.send(()); + break; + }, + } + } + } + + fn collect_memory_reports(&self) -> Vec { + let mut reports = vec![]; + perform_memory_report(|ops| { + reports.push(Report { + path: path!["storage", "local"], + kind: ReportKind::ExplicitJemallocHeapSize, + size: self.environments.size_of(ops), + }); + + reports.push(Report { + path: path!["storage", "session"], + kind: ReportKind::ExplicitJemallocHeapSize, + size: self.session_data.size_of(ops), + }); + }); + reports + } + + fn get_origin_location(&self, origin: &ImmutableOrigin) -> Option { + match &self.config_dir { + Some(config_dir) => { + const NAMESPACE_SERVO_WEBSTORAGE: &uuid::Uuid = &Uuid::from_bytes([ + 0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc5, 0xa4, 0xdb, 0xe2, 0x18, 0xc5, + 0xc8, 0xa3, 0x5d, + ]); + let origin_uuid = Uuid::new_v5( + NAMESPACE_SERVO_WEBSTORAGE, + origin.ascii_serialization().as_bytes(), + ); + Some(config_dir.join("webstorage").join(origin_uuid.to_string())) + }, + None => None, + } + } + + fn get_environment( + &mut self, + origin: &ImmutableOrigin, + ) -> &WebStorageEnvironment { + if self.environments.contains_key(origin) { + return self.environments.get(origin).unwrap(); + } + + let origin_location = self.get_origin_location(origin); + + let engine = SqliteEngine::new(&origin_location, self.thread_pool.clone()).unwrap(); + let environment = WebStorageEnvironment::new(engine); + self.environments.insert(origin.clone(), environment); + self.environments.get(origin).unwrap() + } + + fn get_environment_mut( + &mut self, + origin: &ImmutableOrigin, + ) -> &mut WebStorageEnvironment { + if self.environments.contains_key(origin) { + return self.environments.get_mut(origin).unwrap(); + } + + let origin_location = self.get_origin_location(origin); + + let engine = SqliteEngine::new(&origin_location, self.thread_pool.clone()).unwrap(); + let environment = WebStorageEnvironment::new(engine); + self.environments.insert(origin.clone(), environment); + self.environments.get_mut(origin).unwrap() + } + + fn select_data( + &mut self, + storage_type: StorageType, + webview_id: WebViewId, + origin: ImmutableOrigin, + ) -> Option<&OriginEntry> { + match storage_type { + StorageType::Session => self + .session_data + .get(&webview_id) + .and_then(|origin_map| origin_map.get(&origin)), + StorageType::Local => Some(&self.get_environment(&origin).data), + } + } + + fn select_data_mut( + &mut self, + storage_type: StorageType, + webview_id: WebViewId, + origin: ImmutableOrigin, + ) -> Option<&mut OriginEntry> { + match storage_type { + StorageType::Session => self + .session_data + .get_mut(&webview_id) + .and_then(|origin_map| origin_map.get_mut(&origin)), + StorageType::Local => Some(&mut self.get_environment_mut(&origin).data), + } + } + + fn ensure_data_mut( + &mut self, + storage_type: StorageType, + webview_id: WebViewId, + origin: ImmutableOrigin, + ) -> &mut OriginEntry { + match storage_type { + StorageType::Session => self + .session_data + .entry(webview_id) + .or_default() + .entry(origin) + .or_default(), + StorageType::Local => &mut self.get_environment_mut(&origin).data, + } + } + + fn length( + &mut self, + sender: GenericSender, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + ) { + let data = self.select_data(storage_type, webview_id, url.origin()); + sender + .send(data.map_or(0, |entry| entry.inner().len())) + .unwrap(); + } + + fn key( + &mut self, + sender: GenericSender>, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + index: u32, + ) { + let data = self.select_data(storage_type, webview_id, url.origin()); + let key = data + .and_then(|entry| entry.inner().keys().nth(index as usize)) + .cloned(); + sender.send(key).unwrap(); + } + + fn keys( + &mut self, + sender: GenericSender>, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + ) { + let data = self.select_data(storage_type, webview_id, url.origin()); + let keys = data.map_or(vec![], |entry| entry.inner().keys().cloned().collect()); + + sender.send(keys).unwrap(); + } + + /// Sends Ok(changed, Some(old_value)) in case there was a previous + /// value with the same key name but with different value name + /// otherwise sends Err(()) to indicate that the operation would result in + /// exceeding the quota limit + fn set_item( + &mut self, + sender: GenericSender), ()>>, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + name: String, + value: String, + ) { + let (this_storage_size, other_storage_size) = { + let local_data = self.select_data(StorageType::Local, webview_id, url.origin()); + let local_data_size = local_data.map_or(0, OriginEntry::size); + let session_data = self.select_data(StorageType::Session, webview_id, url.origin()); + let session_data_size = session_data.map_or(0, OriginEntry::size); + match storage_type { + StorageType::Local => (local_data_size, session_data_size), + StorageType::Session => (session_data_size, local_data_size), + } + }; + + let entry = self.ensure_data_mut(storage_type, webview_id, url.origin()); + + let mut new_total_size = this_storage_size + value.len(); + if let Some(old_value) = entry.inner().get(&name) { + new_total_size -= old_value.len(); + } else { + new_total_size += name.len(); + } + + let message = if (new_total_size + other_storage_size) > QUOTA_SIZE_LIMIT { + Err(()) + } else { + let result = + entry + .insert(name.clone(), value.clone()) + .map_or(Ok((true, None)), |old| { + if old == value { + Ok((false, None)) + } else { + Ok((true, Some(old))) + } + }); + let env = self.get_environment_mut(&url.origin()); + env.set(&name, &value); + result + }; + sender.send(message).unwrap(); + } + + fn request_item( + &mut self, + sender: GenericSender>, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + name: String, + ) { + let data = self.select_data(storage_type, webview_id, url.origin()); + sender + .send(data.and_then(|entry| entry.inner().get(&name)).cloned()) + .unwrap(); + } + + /// Sends Some(old_value) in case there was a previous value with the key name, otherwise sends None + fn remove_item( + &mut self, + sender: GenericSender>, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + name: String, + ) { + let data = self.select_data_mut(storage_type, webview_id, url.origin()); + let old_value = data.and_then(|entry| entry.remove(&name)); + sender.send(old_value).unwrap(); + let env = self.get_environment_mut(&url.origin()); + env.delete(&name); + } + + fn clear( + &mut self, + sender: GenericSender, + storage_type: StorageType, + webview_id: WebViewId, + url: ServoUrl, + ) { + let data = self.select_data_mut(storage_type, webview_id, url.origin()); + sender + .send(data.is_some_and(|entry| { + if !entry.inner().is_empty() { + entry.clear(); + true + } else { + false + } + })) + .unwrap(); + let env = self.get_environment_mut(&url.origin()); + env.clear(); + } + + fn clone(&mut self, src_webview_id: WebViewId, dest_webview_id: WebViewId) { + let Some(src_origin_entries) = self.session_data.get(&src_webview_id) else { + return; + }; + + let dest_origin_entries = src_origin_entries.clone(); + self.session_data + .insert(dest_webview_id, dest_origin_entries); + } +} diff --git a/components/storage/webstorage/webstorage_thread.rs b/components/storage/webstorage/webstorage_thread.rs deleted file mode 100644 index 02c13903047..00000000000 --- a/components/storage/webstorage/webstorage_thread.rs +++ /dev/null @@ -1,488 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -use std::borrow::ToOwned; -use std::collections::BTreeMap; -use std::path::PathBuf; -use std::sync::Arc; -use std::thread; - -use base::generic_channel::{self, GenericReceiver, GenericSender}; -use base::id::WebViewId; -use base::threadpool::ThreadPool; -use malloc_size_of::MallocSizeOf; -use malloc_size_of_derive::MallocSizeOf; -use profile_traits::mem::{ - ProcessReports, ProfilerChan as MemProfilerChan, Report, ReportKind, perform_memory_report, -}; -use profile_traits::path; -use rustc_hash::FxHashMap; -use servo_config::pref; -use servo_url::{ImmutableOrigin, ServoUrl}; -use storage_traits::webstorage_thread::{StorageType, WebStorageThreadMsg}; -use uuid::Uuid; - -use crate::webstorage::engines::WebStorageEngine; -use crate::webstorage::engines::sqlite::SqliteEngine; - -const QUOTA_SIZE_LIMIT: usize = 5 * 1024 * 1024; - -pub trait WebStorageThreadFactory { - fn new(config_dir: Option, mem_profiler_chan: MemProfilerChan) -> Self; -} - -impl WebStorageThreadFactory for GenericSender { - /// Create a storage thread - fn new( - config_dir: Option, - mem_profiler_chan: MemProfilerChan, - ) -> GenericSender { - let (chan, port) = generic_channel::channel().unwrap(); - let chan2 = chan.clone(); - thread::Builder::new() - .name("WebStorageManager".to_owned()) - .spawn(move || { - mem_profiler_chan.run_with_memory_reporting( - || WebStorageManager::new(port, config_dir).start(), - String::from("storage-reporter"), - chan2, - WebStorageThreadMsg::CollectMemoryReport, - ); - }) - .expect("Thread spawning failed"); - chan - } -} - -#[derive(Clone, Default, MallocSizeOf)] -pub struct OriginEntry { - tree: BTreeMap, - size: usize, -} - -impl OriginEntry { - pub fn inner(&self) -> &BTreeMap { - &self.tree - } - - pub fn insert(&mut self, key: String, value: String) -> Option { - let old_value = self.tree.insert(key.clone(), value.clone()); - let size_change = match &old_value { - Some(old) => value.len() as isize - old.len() as isize, - None => (key.len() + value.len()) as isize, - }; - self.size = (self.size as isize + size_change) as usize; - old_value - } - - pub fn remove(&mut self, key: &str) -> Option { - let old_value = self.tree.remove(key); - if let Some(old) = &old_value { - self.size -= key.len() + old.len(); - } - old_value - } - - pub fn clear(&mut self) { - self.tree.clear(); - self.size = 0; - } - - pub fn size(&self) -> usize { - self.size - } -} - -struct WebStorageEnvironment { - engine: E, - data: OriginEntry, -} - -impl MallocSizeOf for WebStorageEnvironment { - fn size_of(&self, ops: &mut malloc_size_of::MallocSizeOfOps) -> usize { - self.data.size_of(ops) - } -} - -impl WebStorageEnvironment { - fn new(engine: E) -> Self { - WebStorageEnvironment { - data: engine.load().unwrap_or_default(), - engine, - } - } - - fn clear(&mut self) { - self.data.clear(); - let _ = self.engine.clear(); - } - - fn delete(&mut self, key: &str) { - let _ = self.engine.delete(key); - } - - fn set(&mut self, key: &str, value: &str) { - let _ = self.engine.set(key, value); - } -} - -impl Drop for WebStorageEnvironment { - fn drop(&mut self) { - self.engine.save(&self.data); - } -} - -struct WebStorageManager { - port: GenericReceiver, - session_data: FxHashMap>, - config_dir: Option, - thread_pool: Arc, - environments: FxHashMap>, -} - -impl WebStorageManager { - fn new( - port: GenericReceiver, - config_dir: Option, - ) -> WebStorageManager { - // Uses an estimate of the system cpus to process Webstorage transactions - // See https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html - // If no information can be obtained about the system, uses 4 threads as a default - let thread_count = thread::available_parallelism() - .map(|i| i.get()) - .unwrap_or(pref!(threadpools_fallback_worker_num) as usize) - .min(pref!(threadpools_webstorage_workers_max).max(1) as usize); - WebStorageManager { - port, - session_data: FxHashMap::default(), - config_dir, - thread_pool: Arc::new(ThreadPool::new(thread_count, "WebStorage".to_string())), - environments: FxHashMap::default(), - } - } -} - -impl WebStorageManager { - fn start(&mut self) { - loop { - match self.port.recv().unwrap() { - WebStorageThreadMsg::Length(sender, storage_type, webview_id, url) => { - self.length(sender, storage_type, webview_id, url) - }, - WebStorageThreadMsg::Key(sender, storage_type, webview_id, url, index) => { - self.key(sender, storage_type, webview_id, url, index) - }, - WebStorageThreadMsg::Keys(sender, storage_type, webview_id, url) => { - self.keys(sender, storage_type, webview_id, url) - }, - WebStorageThreadMsg::SetItem( - sender, - storage_type, - webview_id, - url, - name, - value, - ) => { - self.set_item(sender, storage_type, webview_id, url, name, value); - }, - WebStorageThreadMsg::GetItem(sender, storage_type, webview_id, url, name) => { - self.request_item(sender, storage_type, webview_id, url, name) - }, - WebStorageThreadMsg::RemoveItem(sender, storage_type, webview_id, url, name) => { - self.remove_item(sender, storage_type, webview_id, url, name); - }, - WebStorageThreadMsg::Clear(sender, storage_type, webview_id, url) => { - self.clear(sender, storage_type, webview_id, url); - }, - WebStorageThreadMsg::Clone { - sender, - src: src_webview_id, - dest: dest_webview_id, - } => { - self.clone(src_webview_id, dest_webview_id); - let _ = sender.send(()); - }, - WebStorageThreadMsg::CollectMemoryReport(sender) => { - let reports = self.collect_memory_reports(); - sender.send(ProcessReports::new(reports)); - }, - WebStorageThreadMsg::Exit(sender) => { - // Nothing to do since we save localstorage set eagerly. - let _ = sender.send(()); - break; - }, - } - } - } - - fn collect_memory_reports(&self) -> Vec { - let mut reports = vec![]; - perform_memory_report(|ops| { - reports.push(Report { - path: path!["storage", "local"], - kind: ReportKind::ExplicitJemallocHeapSize, - size: self.environments.size_of(ops), - }); - - reports.push(Report { - path: path!["storage", "session"], - kind: ReportKind::ExplicitJemallocHeapSize, - size: self.session_data.size_of(ops), - }); - }); - reports - } - - fn get_origin_location(&self, origin: &ImmutableOrigin) -> Option { - match &self.config_dir { - Some(config_dir) => { - const NAMESPACE_SERVO_WEBSTORAGE: &uuid::Uuid = &Uuid::from_bytes([ - 0x37, 0x9e, 0x56, 0xb0, 0x1a, 0x76, 0x44, 0xc5, 0xa4, 0xdb, 0xe2, 0x18, 0xc5, - 0xc8, 0xa3, 0x5d, - ]); - let origin_uuid = Uuid::new_v5( - NAMESPACE_SERVO_WEBSTORAGE, - origin.ascii_serialization().as_bytes(), - ); - Some(config_dir.join("webstorage").join(origin_uuid.to_string())) - }, - None => None, - } - } - - fn get_environment( - &mut self, - origin: &ImmutableOrigin, - ) -> &WebStorageEnvironment { - if self.environments.contains_key(origin) { - return self.environments.get(origin).unwrap(); - } - - let origin_location = self.get_origin_location(origin); - - let engine = SqliteEngine::new(&origin_location, self.thread_pool.clone()).unwrap(); - let environment = WebStorageEnvironment::new(engine); - self.environments.insert(origin.clone(), environment); - self.environments.get(origin).unwrap() - } - - fn get_environment_mut( - &mut self, - origin: &ImmutableOrigin, - ) -> &mut WebStorageEnvironment { - if self.environments.contains_key(origin) { - return self.environments.get_mut(origin).unwrap(); - } - - let origin_location = self.get_origin_location(origin); - - let engine = SqliteEngine::new(&origin_location, self.thread_pool.clone()).unwrap(); - let environment = WebStorageEnvironment::new(engine); - self.environments.insert(origin.clone(), environment); - self.environments.get_mut(origin).unwrap() - } - - fn select_data( - &mut self, - storage_type: StorageType, - webview_id: WebViewId, - origin: ImmutableOrigin, - ) -> Option<&OriginEntry> { - match storage_type { - StorageType::Session => self - .session_data - .get(&webview_id) - .and_then(|origin_map| origin_map.get(&origin)), - StorageType::Local => Some(&self.get_environment(&origin).data), - } - } - - fn select_data_mut( - &mut self, - storage_type: StorageType, - webview_id: WebViewId, - origin: ImmutableOrigin, - ) -> Option<&mut OriginEntry> { - match storage_type { - StorageType::Session => self - .session_data - .get_mut(&webview_id) - .and_then(|origin_map| origin_map.get_mut(&origin)), - StorageType::Local => Some(&mut self.get_environment_mut(&origin).data), - } - } - - fn ensure_data_mut( - &mut self, - storage_type: StorageType, - webview_id: WebViewId, - origin: ImmutableOrigin, - ) -> &mut OriginEntry { - match storage_type { - StorageType::Session => self - .session_data - .entry(webview_id) - .or_default() - .entry(origin) - .or_default(), - StorageType::Local => &mut self.get_environment_mut(&origin).data, - } - } - - fn length( - &mut self, - sender: GenericSender, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - ) { - let data = self.select_data(storage_type, webview_id, url.origin()); - sender - .send(data.map_or(0, |entry| entry.inner().len())) - .unwrap(); - } - - fn key( - &mut self, - sender: GenericSender>, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - index: u32, - ) { - let data = self.select_data(storage_type, webview_id, url.origin()); - let key = data - .and_then(|entry| entry.inner().keys().nth(index as usize)) - .cloned(); - sender.send(key).unwrap(); - } - - fn keys( - &mut self, - sender: GenericSender>, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - ) { - let data = self.select_data(storage_type, webview_id, url.origin()); - let keys = data.map_or(vec![], |entry| entry.inner().keys().cloned().collect()); - - sender.send(keys).unwrap(); - } - - /// Sends Ok(changed, Some(old_value)) in case there was a previous - /// value with the same key name but with different value name - /// otherwise sends Err(()) to indicate that the operation would result in - /// exceeding the quota limit - fn set_item( - &mut self, - sender: GenericSender), ()>>, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - name: String, - value: String, - ) { - let (this_storage_size, other_storage_size) = { - let local_data = self.select_data(StorageType::Local, webview_id, url.origin()); - let local_data_size = local_data.map_or(0, OriginEntry::size); - let session_data = self.select_data(StorageType::Session, webview_id, url.origin()); - let session_data_size = session_data.map_or(0, OriginEntry::size); - match storage_type { - StorageType::Local => (local_data_size, session_data_size), - StorageType::Session => (session_data_size, local_data_size), - } - }; - - let entry = self.ensure_data_mut(storage_type, webview_id, url.origin()); - - let mut new_total_size = this_storage_size + value.len(); - if let Some(old_value) = entry.inner().get(&name) { - new_total_size -= old_value.len(); - } else { - new_total_size += name.len(); - } - - let message = if (new_total_size + other_storage_size) > QUOTA_SIZE_LIMIT { - Err(()) - } else { - let result = - entry - .insert(name.clone(), value.clone()) - .map_or(Ok((true, None)), |old| { - if old == value { - Ok((false, None)) - } else { - Ok((true, Some(old))) - } - }); - let env = self.get_environment_mut(&url.origin()); - env.set(&name, &value); - result - }; - sender.send(message).unwrap(); - } - - fn request_item( - &mut self, - sender: GenericSender>, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - name: String, - ) { - let data = self.select_data(storage_type, webview_id, url.origin()); - sender - .send(data.and_then(|entry| entry.inner().get(&name)).cloned()) - .unwrap(); - } - - /// Sends Some(old_value) in case there was a previous value with the key name, otherwise sends None - fn remove_item( - &mut self, - sender: GenericSender>, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - name: String, - ) { - let data = self.select_data_mut(storage_type, webview_id, url.origin()); - let old_value = data.and_then(|entry| entry.remove(&name)); - sender.send(old_value).unwrap(); - let env = self.get_environment_mut(&url.origin()); - env.delete(&name); - } - - fn clear( - &mut self, - sender: GenericSender, - storage_type: StorageType, - webview_id: WebViewId, - url: ServoUrl, - ) { - let data = self.select_data_mut(storage_type, webview_id, url.origin()); - sender - .send(data.is_some_and(|entry| { - if !entry.inner().is_empty() { - entry.clear(); - true - } else { - false - } - })) - .unwrap(); - let env = self.get_environment_mut(&url.origin()); - env.clear(); - } - - fn clone(&mut self, src_webview_id: WebViewId, dest_webview_id: WebViewId) { - let Some(src_origin_entries) = self.session_data.get(&src_webview_id) else { - return; - }; - - let dest_origin_entries = src_origin_entries.clone(); - self.session_data - .insert(dest_webview_id, dest_origin_entries); - } -}