Browse Source

v8: implement VirtualMemory class on SunOS

Unbreaks build on SunOS. Re-applies 4908e5bf7a.
v0.7.4-release
Ben Noordhuis 13 years ago
parent
commit
b9529545e1
  1. 141
      deps/v8/src/platform-solaris.cc

141
deps/v8/src/platform-solaris.cc

@ -54,6 +54,7 @@
#include "platform.h" #include "platform.h"
#include "vm-state-inl.h" #include "vm-state-inl.h"
#include "v8threads.h"
// It seems there is a bug in some Solaris distributions (experienced in // It seems there is a bug in some Solaris distributions (experienced in
@ -83,6 +84,33 @@ namespace internal {
static const pthread_t kNoThread = (pthread_t) 0; static const pthread_t kNoThread = (pthread_t) 0;
static void* GetRandomMmapAddr() {
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
#ifdef V8_TARGET_ARCH_X64
uint64_t rnd1 = V8::RandomPrivate(isolate);
uint64_t rnd2 = V8::RandomPrivate(isolate);
uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc).
raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
#endif
return reinterpret_cast<void*>(raw_addr);
}
return NULL;
}
double ceiling(double x) { double ceiling(double x) {
return ceil(x); return ceil(x);
} }
@ -322,43 +350,126 @@ static const int kMmapFd = -1;
static const int kMmapFdOffset = 0; static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) { VirtualMemory::VirtualMemory(size_t size) {
address_ = mmap(NULL, size, PROT_NONE, address_ = ReserveRegion(size);
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size; size_ = size;
} }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() { VirtualMemory::~VirtualMemory() {
if (IsReserved()) { if (IsReserved()) {
if (0 == munmap(address(), size())) address_ = MAP_FAILED; bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
} }
} }
bool VirtualMemory::IsReserved() { bool VirtualMemory::IsReserved() {
return address_ != MAP_FAILED; return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
} }
bool VirtualMemory::Commit(void* address, size_t size, bool executable) { bool VirtualMemory::Uncommit(void* address, size_t size) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); return UncommitRegion(address, size);
if (MAP_FAILED == mmap(address, size, prot, }
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(address, size); UpdateAllocatedSpaceLimits(base, size);
return true; return true;
} }
bool VirtualMemory::Uncommit(void* address, size_t size) { bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(address, size, PROT_NONE, return mmap(base,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, size,
kMmapFd, kMmapFdOffset) != MAP_FAILED; PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
} }

Loading…
Cancel
Save