kernel/elf: Map user images with kernel protections only at first.

They're still mapped into the team address space and with user addresses,
of course, and we reset all their protections later on anyway. This allows
a number of arch_cpu_{enable|disable}_user_access() calls to be dropped.

Also adjust the name of ro/text segments to "rx" to match what
runtime_loader now does.

Tested with SMAP enabled, still works.
This commit is contained in:
Augustin Cavalier 2025-01-15 13:33:46 -05:00
parent b21f31ea16
commit 26cf47386e

View File

@ -1954,8 +1954,9 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
id = vm_map_file(team->id, regionName, (void **)&regionAddress, id = vm_map_file(team->id, regionName, (void **)&regionAddress,
addressSpec, fileUpperBound, addressSpec, fileUpperBound,
B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE)); REGION_PRIVATE_MAP, false, fd,
ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) { if (id < B_OK) {
dprintf("error mapping file data: %s!\n", strerror(id)); dprintf("error mapping file data: %s!\n", strerror(id));
return B_NOT_AN_EXECUTABLE; return B_NOT_AN_EXECUTABLE;
@ -1976,9 +1977,7 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
size_t amount = fileUpperBound size_t amount = fileUpperBound
- (programHeaders[i].p_vaddr % B_PAGE_SIZE) - (programHeaders[i].p_vaddr % B_PAGE_SIZE)
- (programHeaders[i].p_filesz); - (programHeaders[i].p_filesz);
arch_cpu_enable_user_access();
memset((void *)start, 0, amount); memset((void *)start, 0, amount);
arch_cpu_disable_user_access();
// Check if we need extra storage for the bss - we have to do this if // Check if we need extra storage for the bss - we have to do this if
// the above region doesn't already comprise the memory size, too. // the above region doesn't already comprise the memory size, too.
@ -1994,7 +1993,7 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
virtualRestrictions.address_specification = B_EXACT_ADDRESS; virtualRestrictions.address_specification = B_EXACT_ADDRESS;
physical_address_restrictions physicalRestrictions = {}; physical_address_restrictions physicalRestrictions = {};
id = create_area_etc(team->id, regionName, bssSize, B_NO_LOCK, id = create_area_etc(team->id, regionName, bssSize, B_NO_LOCK,
B_READ_AREA | B_WRITE_AREA, 0, 0, &virtualRestrictions, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
&physicalRestrictions, (void**)&regionAddress); &physicalRestrictions, (void**)&regionAddress);
if (id < B_OK) { if (id < B_OK) {
dprintf("error allocating bss area: %s!\n", strerror(id)); dprintf("error allocating bss area: %s!\n", strerror(id));
@ -2003,14 +2002,15 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
} }
} else { } else {
// assume ro/text segment // assume ro/text segment
snprintf(regionName, B_OS_NAME_LENGTH, "%s_seg%dro", baseName, i); snprintf(regionName, B_OS_NAME_LENGTH, "%s_seg%drx", baseName, i);
size_t segmentSize = ROUNDUP(programHeaders[i].p_memsz size_t segmentSize = ROUNDUP(programHeaders[i].p_memsz
+ (programHeaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE); + (programHeaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE);
id = vm_map_file(team->id, regionName, (void **)&regionAddress, id = vm_map_file(team->id, regionName, (void **)&regionAddress,
addressSpec, segmentSize, addressSpec, segmentSize,
B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false, fd, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
REGION_PRIVATE_MAP, false, fd,
ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE)); ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) { if (id < B_OK) {
dprintf("error mapping file text: %s!\n", strerror(id)); dprintf("error mapping file text: %s!\n", strerror(id));
@ -2038,20 +2038,13 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
// modify the dynamic ptr by the delta of the regions // modify the dynamic ptr by the delta of the regions
image->dynamic_section += image->text_region.delta; image->dynamic_section += image->text_region.delta;
arch_cpu_enable_user_access();
status = elf_parse_dynamic_section(image); status = elf_parse_dynamic_section(image);
if (status != B_OK) { if (status != B_OK)
arch_cpu_disable_user_access();
return status; return status;
}
status = elf_relocate(image, image); status = elf_relocate(image, image);
if (status != B_OK) { if (status != B_OK)
arch_cpu_disable_user_access();
return status; return status;
}
arch_cpu_disable_user_access();
// set correct area protection // set correct area protection
for (int i = 0; i < elfHeader.e_phnum; i++) { for (int i = 0; i < elfHeader.e_phnum; i++) {
@ -2059,7 +2052,6 @@ elf_load_user_image(const char *path, Team *team, uint32 flags, addr_t *entry)
continue; continue;
uint32 protection = 0; uint32 protection = 0;
if (programHeaders[i].p_flags & PF_EXECUTE) if (programHeaders[i].p_flags & PF_EXECUTE)
protection |= B_EXECUTE_AREA; protection |= B_EXECUTE_AREA;
if (programHeaders[i].p_flags & PF_WRITE) if (programHeaders[i].p_flags & PF_WRITE)