Adapt and heavily simplify ClingJITLinkMemoryManager
Now we can just inherit from InProcessMemoryManager and override the deallocate() method.
This commit is contained in:
parent
799baa2781
commit
95a4fc8875
@ -194,114 +194,24 @@ namespace {
|
||||
};
|
||||
|
||||
/// A JITLinkMemoryManager for Cling that never frees its allocations.
|
||||
class ClingJITLinkMemoryManager : public JITLinkMemoryManager {
|
||||
class ClingJITLinkMemoryManager : public InProcessMemoryManager {
|
||||
public:
|
||||
Expected<std::unique_ptr<Allocation>>
|
||||
allocate(const JITLinkDylib* JD,
|
||||
const SegmentsRequestMap& Request) override {
|
||||
// A copy of InProcessMemoryManager::allocate with an empty implementation
|
||||
// of IPMMAlloc::deallocate.
|
||||
using InProcessMemoryManager::InProcessMemoryManager;
|
||||
|
||||
using AllocationMap = DenseMap<unsigned, sys::MemoryBlock>;
|
||||
void deallocate(std::vector<FinalizedAlloc> Allocs,
|
||||
OnDeallocatedFunction OnDeallocated) override {
|
||||
// Disabled until CallFunc is informed about unloading, and can
|
||||
// re-generate the wrapper (if the decl is still available). See
|
||||
// https://github.com/root-project/root/issues/10898
|
||||
|
||||
// Local class for allocation.
|
||||
class IPMMAlloc : public Allocation {
|
||||
public:
|
||||
IPMMAlloc(AllocationMap SegBlocks) : SegBlocks(std::move(SegBlocks)) {}
|
||||
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
|
||||
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
||||
return {static_cast<char*>(SegBlocks[Seg].base()),
|
||||
SegBlocks[Seg].allocatedSize()};
|
||||
}
|
||||
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
|
||||
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
||||
return pointerToJITTargetAddress(SegBlocks[Seg].base());
|
||||
}
|
||||
void finalizeAsync(FinalizeContinuation OnFinalize) override {
|
||||
OnFinalize(applyProtections());
|
||||
}
|
||||
Error deallocate() override {
|
||||
// Disabled until CallFunc is informed about unloading, and can
|
||||
// re-generate the wrapper (if the decl is still available). See
|
||||
// https://github.com/root-project/root/issues/10898
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
private:
|
||||
Error applyProtections() {
|
||||
for (auto& KV : SegBlocks) {
|
||||
auto& Prot = KV.first;
|
||||
auto& Block = KV.second;
|
||||
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
|
||||
return errorCodeToError(EC);
|
||||
if (Prot & sys::Memory::MF_EXEC)
|
||||
sys::Memory::InvalidateInstructionCache(Block.base(),
|
||||
Block.allocatedSize());
|
||||
}
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
AllocationMap SegBlocks;
|
||||
};
|
||||
|
||||
if (!isPowerOf2_64((uint64_t)sys::Process::getPageSizeEstimate()))
|
||||
return make_error<StringError>("Page size is not a power of 2",
|
||||
inconvertibleErrorCode());
|
||||
|
||||
AllocationMap Blocks;
|
||||
const sys::Memory::ProtectionFlags ReadWrite =
|
||||
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
|
||||
sys::Memory::MF_WRITE);
|
||||
|
||||
// Compute the total number of pages to allocate.
|
||||
size_t TotalSize = 0;
|
||||
for (auto& KV : Request) {
|
||||
const auto& Seg = KV.second;
|
||||
|
||||
if (Seg.getAlignment() > sys::Process::getPageSizeEstimate())
|
||||
return make_error<StringError>("Cannot request higher than page "
|
||||
"alignment",
|
||||
inconvertibleErrorCode());
|
||||
|
||||
TotalSize = alignTo(TotalSize, sys::Process::getPageSizeEstimate());
|
||||
TotalSize += Seg.getContentSize();
|
||||
TotalSize += Seg.getZeroFillSize();
|
||||
// We still have to release the allocations which resets their addresses
|
||||
// to FinalizedAlloc::InvalidAddr, or the assertion in ~FinalizedAlloc
|
||||
// will be unhappy...
|
||||
for (auto &Alloc : Allocs) {
|
||||
Alloc.release();
|
||||
}
|
||||
|
||||
// Allocate one slab to cover all the segments.
|
||||
std::error_code EC;
|
||||
auto SlabRemaining =
|
||||
sys::Memory::allocateMappedMemory(TotalSize, nullptr, ReadWrite, EC);
|
||||
|
||||
if (EC)
|
||||
return errorCodeToError(EC);
|
||||
|
||||
// Allocate segment memory from the slab.
|
||||
for (auto& KV : Request) {
|
||||
|
||||
const auto& Seg = KV.second;
|
||||
|
||||
uint64_t SegmentSize =
|
||||
alignTo(Seg.getContentSize() + Seg.getZeroFillSize(),
|
||||
sys::Process::getPageSizeEstimate());
|
||||
assert(SlabRemaining.allocatedSize() >= SegmentSize &&
|
||||
"Mapping exceeds allocation");
|
||||
|
||||
sys::MemoryBlock SegMem(SlabRemaining.base(), SegmentSize);
|
||||
SlabRemaining =
|
||||
sys::MemoryBlock((char*)SlabRemaining.base() + SegmentSize,
|
||||
SlabRemaining.allocatedSize() - SegmentSize);
|
||||
|
||||
// Zero out the zero-fill memory.
|
||||
memset(static_cast<char*>(SegMem.base()) + Seg.getContentSize(), 0,
|
||||
Seg.getZeroFillSize());
|
||||
|
||||
// Record the block for this segment.
|
||||
Blocks[KV.first] = std::move(SegMem);
|
||||
}
|
||||
|
||||
return std::unique_ptr<InProcessMemoryManager::Allocation>(
|
||||
new IPMMAlloc(std::move(Blocks)));
|
||||
// Pretend we successfully deallocated everything...
|
||||
OnDeallocated(Error::success());
|
||||
}
|
||||
};
|
||||
|
||||
@ -553,8 +463,9 @@ IncrementalJIT::IncrementalJIT(
|
||||
// memory segments; the default InProcessMemoryManager (which is mostly
|
||||
// copied above) already does slab allocation to keep all segments
|
||||
// together which is needed for exception handling support.
|
||||
unsigned PageSize = *sys::Process::getPageSize();
|
||||
auto ObjLinkingLayer = std::make_unique<ObjectLinkingLayer>(
|
||||
ES, std::make_unique<ClingJITLinkMemoryManager>());
|
||||
ES, std::make_unique<ClingJITLinkMemoryManager>(PageSize));
|
||||
ObjLinkingLayer->addPlugin(std::make_unique<EHFrameRegistrationPlugin>(
|
||||
ES, std::make_unique<InProcessEHFrameRegistrar>()));
|
||||
return ObjLinkingLayer;
|
||||
|
Loading…
x
Reference in New Issue
Block a user