mirror of
https://review.haiku-os.org/haiku
synced 2025-01-18 12:38:51 +01:00
runtime_loader: Consolidate some elf_region code.
No functional change intended.
This commit is contained in:
parent
e38be9392a
commit
b21f31ea16
@ -131,64 +131,46 @@ parse_program_headers(image_t* image, char* buff, int phnum, int phentsize)
|
||||
flags |= RFLAG_EXECUTABLE;
|
||||
}
|
||||
|
||||
elf_region_t& region = image->regions[regcount];
|
||||
region.start = pheader->p_vaddr;
|
||||
region.vmstart = PAGE_BASE(pheader->p_vaddr);
|
||||
region.fdstart = pheader->p_offset;
|
||||
region.fdsize = pheader->p_filesz;
|
||||
region.delta = 0;
|
||||
region.flags = flags;
|
||||
|
||||
if (pheader->p_memsz == pheader->p_filesz) {
|
||||
/*
|
||||
* everything in one area
|
||||
*/
|
||||
image->regions[regcount].start = pheader->p_vaddr;
|
||||
image->regions[regcount].size = pheader->p_memsz;
|
||||
image->regions[regcount].vmstart
|
||||
= PAGE_BASE(pheader->p_vaddr);
|
||||
image->regions[regcount].vmsize
|
||||
= TO_PAGE_SIZE(pheader->p_memsz
|
||||
+ PAGE_OFFSET(pheader->p_vaddr));
|
||||
image->regions[regcount].fdstart = pheader->p_offset;
|
||||
image->regions[regcount].fdsize = pheader->p_filesz;
|
||||
image->regions[regcount].delta = 0;
|
||||
image->regions[regcount].flags = flags;
|
||||
// everything in one area
|
||||
region.size = pheader->p_memsz;
|
||||
region.vmsize = TO_PAGE_SIZE(pheader->p_memsz
|
||||
+ PAGE_OFFSET(pheader->p_vaddr));
|
||||
} else {
|
||||
/*
|
||||
* may require splitting
|
||||
*/
|
||||
addr_t A = TO_PAGE_SIZE(pheader->p_vaddr
|
||||
+ pheader->p_memsz);
|
||||
addr_t B = TO_PAGE_SIZE(pheader->p_vaddr
|
||||
+ pheader->p_filesz);
|
||||
|
||||
image->regions[regcount].start = pheader->p_vaddr;
|
||||
image->regions[regcount].size = pheader->p_filesz;
|
||||
image->regions[regcount].vmstart
|
||||
= PAGE_BASE(pheader->p_vaddr);
|
||||
image->regions[regcount].vmsize
|
||||
= TO_PAGE_SIZE(pheader->p_filesz
|
||||
+ PAGE_OFFSET(pheader->p_vaddr));
|
||||
image->regions[regcount].fdstart = pheader->p_offset;
|
||||
image->regions[regcount].fdsize = pheader->p_filesz;
|
||||
image->regions[regcount].delta = 0;
|
||||
image->regions[regcount].flags = flags;
|
||||
// may require splitting
|
||||
region.size = pheader->p_filesz;
|
||||
region.vmsize = TO_PAGE_SIZE(pheader->p_filesz
|
||||
+ PAGE_OFFSET(pheader->p_vaddr));
|
||||
|
||||
addr_t A = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_memsz);
|
||||
addr_t B = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_filesz);
|
||||
if (A != B) {
|
||||
/*
|
||||
* yeah, it requires splitting
|
||||
*/
|
||||
regcount += 1;
|
||||
image->regions[regcount].start = pheader->p_vaddr;
|
||||
image->regions[regcount].size
|
||||
= pheader->p_memsz - pheader->p_filesz;
|
||||
image->regions[regcount].vmstart
|
||||
= image->regions[regcount-1].vmstart
|
||||
+ image->regions[regcount-1].vmsize;
|
||||
image->regions[regcount].vmsize
|
||||
= TO_PAGE_SIZE(pheader->p_memsz
|
||||
// yeah, it requires splitting
|
||||
regcount++;
|
||||
elf_region_t& regionB = image->regions[regcount];
|
||||
|
||||
regionB.start = pheader->p_vaddr;
|
||||
regionB.size = pheader->p_memsz - pheader->p_filesz;
|
||||
regionB.vmstart = region.vmstart + region.vmsize;
|
||||
regionB.vmsize
|
||||
= TO_PAGE_SIZE(pheader->p_memsz
|
||||
+ PAGE_OFFSET(pheader->p_vaddr))
|
||||
- image->regions[regcount-1].vmsize;
|
||||
image->regions[regcount].fdstart = 0;
|
||||
image->regions[regcount].fdsize = 0;
|
||||
image->regions[regcount].delta = 0;
|
||||
image->regions[regcount].flags = flags | RFLAG_ANON;
|
||||
- region.vmsize;
|
||||
regionB.fdstart = 0;
|
||||
regionB.fdsize = 0;
|
||||
regionB.delta = 0;
|
||||
regionB.flags = flags | RFLAG_ANON;
|
||||
}
|
||||
}
|
||||
regcount += 1;
|
||||
regcount++;
|
||||
break;
|
||||
}
|
||||
case PT_DYNAMIC:
|
||||
|
@ -347,11 +347,13 @@ map_image(int fd, char const* path, image_t* image, bool fixed)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
for (uint32 i = 0; i < image->num_regions; i++) {
|
||||
elf_region_t& region = image->regions[i];
|
||||
|
||||
char regionName[B_OS_NAME_LENGTH];
|
||||
snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
|
||||
baseName, i, (image->regions[i].flags & RFLAG_EXECUTABLE) ?
|
||||
((image->regions[i].flags & RFLAG_WRITABLE) ? "rwx" : "rx")
|
||||
: (image->regions[i].flags & RFLAG_WRITABLE) ? "rw" : "ro");
|
||||
baseName, i, (region.flags & RFLAG_EXECUTABLE) ?
|
||||
((region.flags & RFLAG_WRITABLE) ? "rwx" : "rx")
|
||||
: (region.flags & RFLAG_WRITABLE) ? "rw" : "ro");
|
||||
|
||||
get_image_region_load_address(image, i,
|
||||
i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
|
||||
@ -362,15 +364,15 @@ map_image(int fd, char const* path, image_t* image, bool fixed)
|
||||
if (addressSpecifier != B_EXACT_ADDRESS)
|
||||
loadAddress = reservedAddress;
|
||||
|
||||
if ((image->regions[i].flags & RFLAG_ANON) != 0) {
|
||||
image->regions[i].id = _kern_create_area(regionName,
|
||||
if ((region.flags & RFLAG_ANON) != 0) {
|
||||
region.id = _kern_create_area(regionName,
|
||||
(void**)&loadAddress, B_EXACT_ADDRESS,
|
||||
image->regions[i].vmsize, B_NO_LOCK,
|
||||
region.vmsize, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA);
|
||||
|
||||
if (image->regions[i].id < 0) {
|
||||
if (region.id < 0) {
|
||||
_kern_unreserve_address_range(reservedAddress, reservedSize);
|
||||
return image->regions[i].id;
|
||||
return region.id;
|
||||
}
|
||||
} else {
|
||||
// Map all segments r/w first -- write access might be needed for
|
||||
@ -381,30 +383,30 @@ map_image(int fd, char const* path, image_t* image, bool fixed)
|
||||
// of memory to be committed for them temporarily, just because we
|
||||
// have to write map them.
|
||||
uint32 protection = B_READ_AREA | B_WRITE_AREA
|
||||
| ((image->regions[i].flags & RFLAG_WRITABLE) != 0
|
||||
| ((region.flags & RFLAG_WRITABLE) != 0
|
||||
? 0 : B_OVERCOMMITTING_AREA);
|
||||
image->regions[i].id = _kern_map_file(regionName,
|
||||
region.id = _kern_map_file(regionName,
|
||||
(void**)&loadAddress, B_EXACT_ADDRESS,
|
||||
image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
|
||||
fd, PAGE_BASE(image->regions[i].fdstart));
|
||||
region.vmsize, protection, REGION_PRIVATE_MAP, false,
|
||||
fd, PAGE_BASE(region.fdstart));
|
||||
|
||||
if (image->regions[i].id < 0) {
|
||||
if (region.id < 0) {
|
||||
_kern_unreserve_address_range(reservedAddress, reservedSize);
|
||||
return image->regions[i].id;
|
||||
return region.id;
|
||||
}
|
||||
|
||||
TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
|
||||
(void *)loadAddress, image->regions[i].vmsize,
|
||||
image->regions[i].flags & RFLAG_WRITABLE ? "rw" : "read-only"));
|
||||
(void *)loadAddress, region.vmsize,
|
||||
region.flags & RFLAG_WRITABLE ? "rw" : "read-only"));
|
||||
|
||||
// handle trailer bits in data segment
|
||||
if (image->regions[i].flags & RFLAG_WRITABLE) {
|
||||
if (region.flags & RFLAG_WRITABLE) {
|
||||
addr_t startClearing = loadAddress
|
||||
+ PAGE_OFFSET(image->regions[i].start)
|
||||
+ image->regions[i].size;
|
||||
addr_t toClear = image->regions[i].vmsize
|
||||
- PAGE_OFFSET(image->regions[i].start)
|
||||
- image->regions[i].size;
|
||||
+ PAGE_OFFSET(region.start)
|
||||
+ region.size;
|
||||
addr_t toClear = region.vmsize
|
||||
- PAGE_OFFSET(region.start)
|
||||
- region.size;
|
||||
|
||||
TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
|
||||
startClearing, toClear));
|
||||
@ -412,8 +414,8 @@ map_image(int fd, char const* path, image_t* image, bool fixed)
|
||||
}
|
||||
}
|
||||
|
||||
image->regions[i].delta = loadAddress - image->regions[i].vmstart;
|
||||
image->regions[i].vmstart = loadAddress;
|
||||
region.delta = loadAddress - region.vmstart;
|
||||
region.vmstart = loadAddress;
|
||||
if (i == 0) {
|
||||
TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
|
||||
loadAddress);
|
||||
|
Loading…
Reference in New Issue
Block a user