mirror of
https://github.com/LadybirdBrowser/ladybird
synced 2026-04-25 17:25:08 +02:00
LibHTTP: Short-circuit CacheIndex eviction when under its size limit
remove_entries_exceeding_cache_limit() is called after every network response, but the cache is usually still under budget and nothing needs to evict. Every one of those calls currently still runs the window-function eviction SQL over the whole CacheIndex table just to conclude there is nothing to do. Short-circuit the call when the cache is already within its configured size limit. To make that check cheap, maintain m_total_estimated_size as a running total of the cache's estimated byte size, so the no-op case becomes a single u64 compare and the DB is only touched when there is real work. Bookkeeping: - Seed the total in CacheIndex::create() via a new select_total_estimated_size statement (COALESCE(..., 0) so an empty index returns 0 rather than NULL). - Each Entry caches serialized_request_headers_size and serialized_response_headers_size so we don't re-serialize to recompute its footprint; Entry::estimated_size() centralizes the arithmetic. - create_entry() adds the new entry's size. Any row it displaces is removed via DELETE ... RETURNING so the total stays accurate even for entries that were never loaded into m_entries. - remove_entry() and the bulk DELETE statements were extended with the same RETURNING clause for the same reason. - update_response_headers() shifts the total by the signed delta between old and new serialized header size. Also COALESCEs estimate_cache_size_accessed_since over an empty table to 0 so callers don't have to special-case NULL.
This commit is contained in:
committed by
Alexander Kalenik
parent
d8b28a68cc
commit
14dc9e8ca2
Notes:
github-actions[bot]
2026-04-18 23:32:34 +00:00
Author: https://github.com/kalenikaliaksandr Commit: https://github.com/LadybirdBrowser/ladybird/commit/14dc9e8ca2c Pull-request: https://github.com/LadybirdBrowser/ladybird/pull/8965 Reviewed-by: https://github.com/gmta ✅ Reviewed-by: https://github.com/trflynn89
@@ -98,8 +98,16 @@ ErrorOr<CacheIndex> CacheIndex::create(Database::Database& database, LexicalPath
|
||||
|
||||
Statements statements {};
|
||||
statements.insert_entry = TRY(database.prepare_statement("INSERT OR REPLACE INTO CacheIndex VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"sv));
|
||||
statements.remove_entry = TRY(database.prepare_statement("DELETE FROM CacheIndex WHERE cache_key = ? AND vary_key = ?;"sv));
|
||||
statements.remove_entries_accessed_since = TRY(database.prepare_statement("DELETE FROM CacheIndex WHERE last_access_time >= ? RETURNING cache_key, vary_key;"sv));
|
||||
statements.remove_entry = TRY(database.prepare_statement(R"#(
|
||||
DELETE FROM CacheIndex
|
||||
WHERE cache_key = ? AND vary_key = ?
|
||||
RETURNING data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers);
|
||||
)#"sv));
|
||||
statements.remove_entries_accessed_since = TRY(database.prepare_statement(R"#(
|
||||
DELETE FROM CacheIndex
|
||||
WHERE last_access_time >= ?
|
||||
RETURNING cache_key, vary_key, data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers);
|
||||
)#"sv));
|
||||
statements.select_entries = TRY(database.prepare_statement("SELECT * FROM CacheIndex WHERE cache_key = ?;"sv));
|
||||
statements.update_response_headers = TRY(database.prepare_statement("UPDATE CacheIndex SET response_headers = ? WHERE cache_key = ? AND vary_key = ?;"sv));
|
||||
statements.update_last_access_time = TRY(database.prepare_statement("UPDATE CacheIndex SET last_access_time = ? WHERE cache_key = ? AND vary_key = ?;"sv));
|
||||
@@ -120,15 +128,20 @@ ErrorOr<CacheIndex> CacheIndex::create(Database::Database& database, LexicalPath
|
||||
FROM RankedCacheIndex
|
||||
WHERE cumulative_estimated_size > ?
|
||||
)
|
||||
RETURNING cache_key, vary_key;
|
||||
RETURNING cache_key, vary_key, data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers);
|
||||
)#"sv));
|
||||
|
||||
statements.estimate_cache_size_accessed_since = TRY(database.prepare_statement(R"#(
|
||||
SELECT SUM(data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers))
|
||||
SELECT COALESCE(SUM(data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers)), 0)
|
||||
FROM CacheIndex
|
||||
WHERE last_access_time >= ?;
|
||||
)#"sv));
|
||||
|
||||
statements.select_total_estimated_size = TRY(database.prepare_statement(R"#(
|
||||
SELECT COALESCE(SUM(data_size + OCTET_LENGTH(request_headers) + OCTET_LENGTH(response_headers)), 0)
|
||||
FROM CacheIndex;
|
||||
)#"sv));
|
||||
|
||||
auto disk_space = TRY(FileSystem::compute_disk_space(cache_directory));
|
||||
auto maximum_disk_cache_size = compute_maximum_disk_cache_size(disk_space.free_bytes);
|
||||
|
||||
@@ -138,13 +151,19 @@ ErrorOr<CacheIndex> CacheIndex::create(Database::Database& database, LexicalPath
|
||||
.maximum_disk_cache_entry_size = compute_maximum_disk_cache_entry_size(maximum_disk_cache_size),
|
||||
};
|
||||
|
||||
return CacheIndex { database, statements, limits };
|
||||
u64 total_estimated_size { 0 };
|
||||
database.execute_statement(
|
||||
statements.select_total_estimated_size,
|
||||
[&](auto statement_id) { total_estimated_size = database.result_column<u64>(statement_id, 0); });
|
||||
|
||||
return CacheIndex { database, statements, limits, total_estimated_size };
|
||||
}
|
||||
|
||||
CacheIndex::CacheIndex(Database::Database& database, Statements statements, Limits limits)
|
||||
CacheIndex::CacheIndex(Database::Database& database, Statements statements, Limits limits, u64 total_estimated_size)
|
||||
: m_database(database)
|
||||
, m_statements(statements)
|
||||
, m_limits(limits)
|
||||
, m_total_estimated_size(total_estimated_size)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -164,19 +183,31 @@ ErrorOr<void> CacheIndex::create_entry(u64 cache_key, u64 vary_key, String url,
|
||||
auto serialized_request_headers = serialize_headers(request_headers);
|
||||
auto serialized_response_headers = serialize_headers(response_headers);
|
||||
|
||||
if (data_size + serialized_request_headers.length() + serialized_response_headers.length() > m_limits.maximum_disk_cache_entry_size)
|
||||
return Error::from_string_literal("Cache entry size exceeds allowed maximum");
|
||||
|
||||
Entry entry {
|
||||
.vary_key = vary_key,
|
||||
.url = move(url),
|
||||
.request_headers = move(request_headers),
|
||||
.response_headers = move(response_headers),
|
||||
.data_size = data_size,
|
||||
.serialized_request_headers_size = static_cast<u64>(serialized_request_headers.length()),
|
||||
.serialized_response_headers_size = static_cast<u64>(serialized_response_headers.length()),
|
||||
.request_time = request_time,
|
||||
.response_time = response_time,
|
||||
.last_access_time = now,
|
||||
};
|
||||
auto entry_size = entry.estimated_size();
|
||||
|
||||
if (entry_size > m_limits.maximum_disk_cache_entry_size)
|
||||
return Error::from_string_literal("Cache entry size exceeds allowed maximum");
|
||||
|
||||
m_database->execute_statement(
|
||||
m_statements.remove_entry,
|
||||
[&](auto statement_id) {
|
||||
auto removed_size = m_database->result_column<u64>(statement_id, 0);
|
||||
m_total_estimated_size -= removed_size;
|
||||
},
|
||||
cache_key,
|
||||
vary_key);
|
||||
|
||||
auto& entries = m_entries.ensure(cache_key);
|
||||
auto existing_entry_index = entries.find_first_index_if([&](auto const& existing_entry) {
|
||||
@@ -190,22 +221,37 @@ ErrorOr<void> CacheIndex::create_entry(u64 cache_key, u64 vary_key, String url,
|
||||
else
|
||||
entries.append(move(entry));
|
||||
|
||||
m_total_estimated_size += entry_size;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void CacheIndex::remove_entry(u64 cache_key, u64 vary_key)
|
||||
{
|
||||
m_database->execute_statement(m_statements.remove_entry, {}, cache_key, vary_key);
|
||||
m_database->execute_statement(
|
||||
m_statements.remove_entry,
|
||||
[&](auto statement_id) {
|
||||
auto removed_size = m_database->result_column<u64>(statement_id, 0);
|
||||
m_total_estimated_size -= removed_size;
|
||||
},
|
||||
cache_key,
|
||||
vary_key);
|
||||
|
||||
delete_entry(cache_key, vary_key);
|
||||
}
|
||||
|
||||
void CacheIndex::remove_entries_exceeding_cache_limit(Function<void(u64 cache_key, u64 vary_key)> on_entry_removed)
|
||||
{
|
||||
if (m_total_estimated_size <= m_limits.maximum_disk_cache_size)
|
||||
return;
|
||||
|
||||
m_database->execute_statement(
|
||||
m_statements.remove_entries_exceeding_cache_limit,
|
||||
[&](auto statement_id) {
|
||||
auto cache_key = m_database->result_column<u64>(statement_id, 0);
|
||||
auto vary_key = m_database->result_column<u64>(statement_id, 1);
|
||||
auto removed_size = m_database->result_column<u64>(statement_id, 2);
|
||||
m_total_estimated_size -= removed_size;
|
||||
delete_entry(cache_key, vary_key);
|
||||
|
||||
if (on_entry_removed)
|
||||
@@ -221,6 +267,8 @@ void CacheIndex::remove_entries_accessed_since(UnixDateTime since, Function<void
|
||||
[&](auto statement_id) {
|
||||
auto cache_key = m_database->result_column<u64>(statement_id, 0);
|
||||
auto vary_key = m_database->result_column<u64>(statement_id, 1);
|
||||
auto removed_size = m_database->result_column<u64>(statement_id, 2);
|
||||
m_total_estimated_size -= removed_size;
|
||||
delete_entry(cache_key, vary_key);
|
||||
|
||||
if (on_entry_removed)
|
||||
@@ -235,8 +283,15 @@ void CacheIndex::update_response_headers(u64 cache_key, u64 vary_key, NonnullRef
|
||||
if (!entry.has_value())
|
||||
return;
|
||||
|
||||
m_database->execute_statement(m_statements.update_response_headers, {}, serialize_headers(response_headers), cache_key, vary_key);
|
||||
auto serialized_response_headers = serialize_headers(response_headers);
|
||||
auto serialized_response_headers_size = static_cast<u64>(serialized_response_headers.length());
|
||||
|
||||
m_database->execute_statement(m_statements.update_response_headers, {}, serialized_response_headers, cache_key, vary_key);
|
||||
|
||||
m_total_estimated_size -= entry->serialized_response_headers_size;
|
||||
m_total_estimated_size += serialized_response_headers_size;
|
||||
entry->response_headers = move(response_headers);
|
||||
entry->serialized_response_headers_size = serialized_response_headers_size;
|
||||
}
|
||||
|
||||
void CacheIndex::update_last_access_time(u64 cache_key, u64 vary_key)
|
||||
@@ -270,7 +325,7 @@ Optional<CacheIndex::Entry const&> CacheIndex::find_entry(u64 cache_key, HeaderL
|
||||
auto response_time = m_database->result_column<UnixDateTime>(statement_id, column++);
|
||||
auto last_access_time = m_database->result_column<UnixDateTime>(statement_id, column++);
|
||||
|
||||
entries.empend(vary_key, move(url), deserialize_headers(request_headers), deserialize_headers(response_headers), data_size, request_time, response_time, last_access_time);
|
||||
entries.empend(vary_key, move(url), deserialize_headers(request_headers), deserialize_headers(response_headers), data_size, request_headers.length(), response_headers.length(), request_time, response_time, last_access_time);
|
||||
},
|
||||
cache_key);
|
||||
|
||||
|
||||
@@ -27,10 +27,17 @@ class CacheIndex {
|
||||
NonnullRefPtr<HeaderList> request_headers;
|
||||
NonnullRefPtr<HeaderList> response_headers;
|
||||
u64 data_size { 0 };
|
||||
u64 serialized_request_headers_size { 0 };
|
||||
u64 serialized_response_headers_size { 0 };
|
||||
|
||||
UnixDateTime request_time;
|
||||
UnixDateTime response_time;
|
||||
UnixDateTime last_access_time;
|
||||
|
||||
u64 estimated_size() const
|
||||
{
|
||||
return data_size + serialized_request_headers_size + serialized_response_headers_size;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
@@ -60,6 +67,7 @@ private:
|
||||
Database::StatementID update_response_headers { 0 };
|
||||
Database::StatementID update_last_access_time { 0 };
|
||||
Database::StatementID estimate_cache_size_accessed_since { 0 };
|
||||
Database::StatementID select_total_estimated_size { 0 };
|
||||
};
|
||||
|
||||
struct Limits {
|
||||
@@ -68,7 +76,7 @@ private:
|
||||
u64 maximum_disk_cache_entry_size { 0 };
|
||||
};
|
||||
|
||||
CacheIndex(Database::Database&, Statements, Limits);
|
||||
CacheIndex(Database::Database&, Statements, Limits, u64 total_estimated_size);
|
||||
|
||||
Optional<Entry&> get_entry(u64 cache_key, u64 vary_key);
|
||||
void delete_entry(u64 cache_key, u64 vary_key);
|
||||
@@ -79,6 +87,7 @@ private:
|
||||
HashMap<u64, Vector<Entry>, IdentityHashTraits<u64>> m_entries;
|
||||
|
||||
Limits m_limits;
|
||||
u64 m_total_estimated_size { 0 };
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user