Kernel/riscv64: Make the kernel bootable with the EFI Prekernel

The EFI Prekernel needs the kernel to be relocatable.

pre_init now needs to relocate itself so it still works when linking
without `--no-pie`.
This commit is contained in:
Sönke Holz
2024-10-12 21:29:48 +02:00
parent 788f598871
commit c0d70a6e24
5 changed files with 72 additions and 30 deletions

View File

@@ -4,26 +4,24 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
ENTRY(start)
ENTRY(init)
#define PF_X 0x1
#define PF_W 0x2
#define PF_R 0x4
KERNEL_MAPPING_BASE = 0x2000000000;
PHDRS
{
text PT_LOAD FLAGS(PF_R | PF_X);
data PT_LOAD FLAGS(PF_R | PF_W);
ksyms PT_LOAD FLAGS(PF_R);
bss PT_LOAD FLAGS(PF_R | PF_W);
dynamic_segment PT_LOAD FLAGS(PF_R | PF_W);
dynamic PT_DYNAMIC FLAGS(PF_R | PF_W);
ksyms PT_LOAD FLAGS(PF_R | PF_W);
}
SECTIONS
{
. = KERNEL_MAPPING_BASE;
start_of_kernel_image = .;
.text ALIGN(4K) :
@@ -84,13 +82,6 @@ SECTIONS
end_of_ro_after_init = .;
} :data
.ksyms ALIGN(4K) :
{
start_of_kernel_ksyms = .;
*(.kernel_symbols)
end_of_kernel_ksyms = .;
} :ksyms
/* The bss has to be in its own program header so the prekernel doesn't have to copy segments */
.bss ALIGN(4K) (NOLOAD) :
{
@@ -102,6 +93,18 @@ SECTIONS
*(.heap)
} :bss
.dynamic ALIGN(4K) :
{
*(.dynamic)
} :dynamic_segment :dynamic
.ksyms ALIGN(4K) :
{
start_of_kernel_ksyms = .;
*(.kernel_symbols)
end_of_kernel_ksyms = .;
} :ksyms
. = ALIGN(4K);
start_of_initial_stack = .;

View File

@@ -11,6 +11,8 @@
#include <Kernel/Arch/riscv64/SBI.h>
#include <Kernel/Sections.h>
#include <LibELF/Relocation.h>
namespace Kernel {
UNMAP_AFTER_INIT void dbgln_without_mmu(StringView message)
@@ -43,8 +45,37 @@ UNMAP_AFTER_INIT void dbgln_without_mmu(StringView message)
panic_without_mmu("Unexpected trap"sv);
}
static UNMAP_AFTER_INIT PhysicalPtr physical_load_base()
{
PhysicalPtr physical_load_base;
asm volatile(
"lla %[physical_load_base], start_of_kernel_image\n"
: [physical_load_base] "=r"(physical_load_base));
return physical_load_base;
}
static UNMAP_AFTER_INIT PhysicalPtr dynamic_section_addr()
{
PhysicalPtr dynamic_section_addr;
// Use lla explicitly to prevent a GOT load.
asm volatile(
"lla %[dynamic_section_addr], _DYNAMIC\n"
: [dynamic_section_addr] "=r"(dynamic_section_addr));
return dynamic_section_addr;
}
extern "C" [[noreturn]] UNMAP_AFTER_INIT void pre_init(FlatPtr boot_hart_id, PhysicalPtr flattened_devicetree_paddr)
{
// Apply relative relocations as if we were running at KERNEL_MAPPING_BASE.
// This means that all global variables must be accessed with adjust_by_mapping_base, since we are still running identity mapped.
// Otherwise, we would have to relocate twice: once while running identity mapped, and again when we enable the MMU.
if (!ELF::perform_relative_relocations(physical_load_base(), KERNEL_MAPPING_BASE, dynamic_section_addr()))
panic_without_mmu("Failed to perform relative relocations"sv);
// Catch traps in pre_init
RISCV64::CSR::write(RISCV64::CSR::Address::STVEC, bit_cast<FlatPtr>(&early_trap_handler));

View File

@@ -616,6 +616,7 @@ set(EDID_SOURCES
set(ELF_SOURCES
../Userland/Libraries/LibELF/Image.cpp
../Userland/Libraries/LibELF/Relocation.cpp
../Userland/Libraries/LibELF/Validation.cpp
)
@@ -837,7 +838,7 @@ elseif("${SERENITY_ARCH}" STREQUAL "riscv64")
# The final kernel binary for some reason includes temporary local symbols on riscv64 clang, which causes kernel.map to be too big to fit in its section in the kernel.
# Explicitly pass -X to the linker to remove them.
target_link_options(Kernel PRIVATE LINKER:-T ${CMAKE_CURRENT_BINARY_DIR}/linker.ld -nostdlib LINKER:--no-pie LINKER:-X)
target_link_options(Kernel PRIVATE LINKER:-T ${CMAKE_CURRENT_BINARY_DIR}/linker.ld -nostdlib LINKER:-X)
set_target_properties(Kernel PROPERTIES LINK_DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/linker.ld")
elseif ("${SERENITY_ARCH}" STREQUAL "x86_64")

View File

@@ -10,19 +10,8 @@
namespace ELF {
[[gnu::no_stack_protector]] bool perform_relative_relocations(FlatPtr base_address)
[[gnu::no_stack_protector]] bool perform_relative_relocations(FlatPtr base_address, FlatPtr runtime_base_address, FlatPtr dynamic_section_addr)
{
Elf_Ehdr* header = (Elf_Ehdr*)(base_address);
Elf_Phdr* pheader = (Elf_Phdr*)(base_address + header->e_phoff);
FlatPtr dynamic_section_addr = 0;
for (size_t i = 0; i < (size_t)header->e_phnum; ++i, ++pheader) {
if (pheader->p_type != PT_DYNAMIC)
continue;
dynamic_section_addr = pheader->p_vaddr + base_address;
}
if (!dynamic_section_addr)
return false;
FlatPtr relocation_section_addr = 0;
size_t relocation_table_size = 0;
size_t relocation_count = 0;
@@ -63,18 +52,18 @@ namespace ELF {
auto* patch_address = (FlatPtr*)(base_address + relocation->r_offset);
FlatPtr relocated_address;
if (use_addend) {
relocated_address = base_address + relocation->r_addend;
relocated_address = runtime_base_address + relocation->r_addend;
} else {
__builtin_memcpy(&relocated_address, patch_address, sizeof(relocated_address));
relocated_address += base_address;
relocated_address += runtime_base_address;
}
__builtin_memcpy(patch_address, &relocated_address, sizeof(relocated_address));
}
auto patch_relr = [base_address](FlatPtr* patch_ptr) {
auto patch_relr = [runtime_base_address](FlatPtr* patch_ptr) {
FlatPtr relocated_address;
__builtin_memcpy(&relocated_address, patch_ptr, sizeof(FlatPtr));
relocated_address += base_address;
relocated_address += runtime_base_address;
__builtin_memcpy(patch_ptr, &relocated_address, sizeof(FlatPtr));
};
@@ -97,4 +86,21 @@ namespace ELF {
}
return true;
}
[[gnu::no_stack_protector]] bool perform_relative_relocations(FlatPtr base_address)
{
Elf_Ehdr* header = (Elf_Ehdr*)(base_address);
Elf_Phdr* pheader = (Elf_Phdr*)(base_address + header->e_phoff);
FlatPtr dynamic_section_addr = 0;
for (size_t i = 0; i < (size_t)header->e_phnum; ++i, ++pheader) {
if (pheader->p_type != PT_DYNAMIC)
continue;
dynamic_section_addr = pheader->p_vaddr + base_address;
}
if (!dynamic_section_addr)
return false;
return perform_relative_relocations(base_address, base_address, dynamic_section_addr);
}
}

View File

@@ -10,6 +10,7 @@
namespace ELF {
bool perform_relative_relocations(FlatPtr base_address, FlatPtr runtime_base_address, FlatPtr dynamic_section_addr);
bool perform_relative_relocations(FlatPtr base_address);
}