mirror of
https://github.com/FairRootGroup/FairMQ.git
synced 2025-10-13 16:46:47 +00:00
Shm: initial multiple segments support
This commit is contained in:
parent
b126ede45a
commit
266843cda5
|
@ -75,7 +75,7 @@ class FairMQUnmanagedRegion
|
|||
|
||||
virtual void* GetData() const = 0;
|
||||
virtual size_t GetSize() const = 0;
|
||||
virtual uint64_t GetId() const = 0;
|
||||
virtual uint16_t GetId() const = 0;
|
||||
virtual void SetLinger(uint32_t linger) = 0;
|
||||
virtual uint32_t GetLinger() const = 0;
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ Plugin::ProgOptions ConfigPluginProgramOptions()
|
|||
("print-channels", po::value<bool >()->implicit_value(true), "Print registered channel endpoints in a machine-readable format (<channel name>:<min num subchannels>:<max num subchannels>)")
|
||||
("shm-segment-size", po::value<size_t >()->default_value(2ULL << 30), "Shared memory: size of the shared memory segment (in bytes).")
|
||||
("shm-allocation", po::value<string >()->default_value("rbtree_best_fit"), "Shared memory allocation algorithm: rbtree_best_fit/simple_seq_fit.")
|
||||
("shm-segment-id", po::value<uint16_t >()->default_value(0), "EXPERIMENTAL: Shared memory segment id for message creation.")
|
||||
("shm-mlock-segment", po::value<bool >()->default_value(false), "Shared memory: mlock the shared memory segment after initialization.")
|
||||
("shm-zero-segment", po::value<bool >()->default_value(false), "Shared memory: zero the shared memory segment memory after initialization.")
|
||||
("shm-throw-bad-alloc", po::value<bool >()->default_value(true), "Throw a fair::mq::MessageBadAlloc if cannot allocate a message (retry if false).")
|
||||
|
|
|
@ -81,9 +81,9 @@ struct RegionInfo
|
|||
bool fDestroyed;
|
||||
};
|
||||
|
||||
using Uint64RegionInfoPairAlloc = boost::interprocess::allocator<std::pair<const uint64_t, RegionInfo>, SegmentManager>;
|
||||
using Uint64RegionInfoMap = boost::interprocess::map<uint64_t, RegionInfo, std::less<uint64_t>, Uint64RegionInfoPairAlloc>;
|
||||
using Uint64RegionInfoHashMap = boost::unordered_map<uint64_t, RegionInfo, boost::hash<uint64_t>, std::equal_to<uint64_t>, Uint64RegionInfoPairAlloc>;
|
||||
using Uint16RegionInfoPairAlloc = boost::interprocess::allocator<std::pair<const uint16_t, RegionInfo>, SegmentManager>;
|
||||
using Uint16RegionInfoMap = boost::interprocess::map<uint16_t, RegionInfo, std::less<uint16_t>, Uint16RegionInfoPairAlloc>;
|
||||
using Uint16RegionInfoHashMap = boost::unordered_map<uint16_t, RegionInfo, boost::hash<uint16_t>, std::equal_to<uint16_t>, Uint16RegionInfoPairAlloc>;
|
||||
|
||||
struct SegmentInfo
|
||||
{
|
||||
|
@ -94,9 +94,9 @@ struct SegmentInfo
|
|||
AllocationAlgorithm fAllocationAlgorithm;
|
||||
};
|
||||
|
||||
using Uint64SegmentInfoPairAlloc = boost::interprocess::allocator<std::pair<const uint64_t, SegmentInfo>, SegmentManager>;
|
||||
using Uint64SegmentInfoMap = boost::interprocess::map<uint64_t, SegmentInfo, std::less<uint64_t>, Uint64SegmentInfoPairAlloc>;
|
||||
using Uint64SegmentInfoHashMap = boost::unordered_map<uint64_t, SegmentInfo, boost::hash<uint64_t>, std::equal_to<uint64_t>, Uint64SegmentInfoPairAlloc>;
|
||||
using Uint16SegmentInfoPairAlloc = boost::interprocess::allocator<std::pair<const uint16_t, SegmentInfo>, SegmentManager>;
|
||||
using Uint16SegmentInfoMap = boost::interprocess::map<uint16_t, SegmentInfo, std::less<uint16_t>, Uint16SegmentInfoPairAlloc>;
|
||||
using Uint16SegmentInfoHashMap = boost::unordered_map<uint16_t, SegmentInfo, boost::hash<uint16_t>, std::equal_to<uint16_t>, Uint16SegmentInfoPairAlloc>;
|
||||
|
||||
struct DeviceCounter
|
||||
{
|
||||
|
@ -120,18 +120,19 @@ struct MsgCounter
|
|||
|
||||
struct RegionCounter
|
||||
{
|
||||
RegionCounter(uint64_t c)
|
||||
RegionCounter(uint16_t c)
|
||||
: fCount(c)
|
||||
{}
|
||||
|
||||
std::atomic<uint64_t> fCount;
|
||||
std::atomic<uint16_t> fCount;
|
||||
};
|
||||
|
||||
struct MetaHeader
|
||||
{
|
||||
size_t fSize;
|
||||
size_t fRegionId;
|
||||
size_t fHint;
|
||||
uint16_t fRegionId;
|
||||
uint16_t fSegmentId;
|
||||
boost::interprocess::managed_shared_memory::handle_t fHandle;
|
||||
};
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ class Manager
|
|||
public:
|
||||
Manager(std::string shmId, std::string deviceId, size_t size, const ProgOptions* config)
|
||||
: fShmId(std::move(shmId))
|
||||
, fSegmentId(0)
|
||||
, fDeviceId(std::move(deviceId))
|
||||
, fSegments()
|
||||
, fManagementSegment(boost::interprocess::open_or_create, std::string("fmq_" + fShmId + "_mng").c_str(), 6553600)
|
||||
|
@ -88,6 +89,7 @@ class Manager
|
|||
std::string allocationAlgorithm("rbtree_best_fit");
|
||||
if (config) {
|
||||
mlockSegment = config->GetProperty<bool>("shm-mlock-segment", mlockSegment);
|
||||
fSegmentId = config->GetProperty<uint16_t>("shm-segment-id", fSegmentId);
|
||||
zeroSegment = config->GetProperty<bool>("shm-zero-segment", zeroSegment);
|
||||
autolaunchMonitor = config->GetProperty<bool>("shm-monitor", autolaunchMonitor);
|
||||
fThrowOnBadAlloc = config->GetProperty<bool>("shm-throw-bad-alloc", fThrowOnBadAlloc);
|
||||
|
@ -109,32 +111,30 @@ class Manager
|
|||
std::stringstream ss;
|
||||
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> lock(fShmMtx);
|
||||
|
||||
fShmSegments = fManagementSegment.find_or_construct<Uint64SegmentInfoHashMap>(unique_instance)(fShmVoidAlloc);
|
||||
|
||||
const uint64_t id = 0;
|
||||
fShmSegments = fManagementSegment.find_or_construct<Uint16SegmentInfoHashMap>(unique_instance)(fShmVoidAlloc);
|
||||
|
||||
try {
|
||||
auto it = fShmSegments->find(id);
|
||||
auto it = fShmSegments->find(fSegmentId);
|
||||
if (it == fShmSegments->end()) {
|
||||
// no segment with given id exists, creating
|
||||
if (allocationAlgorithm == "rbtree_best_fit") {
|
||||
fSegments.emplace(id, RBTreeBestFitSegment(create_only, std::string("fmq_" + fShmId + "_main").c_str(), size));
|
||||
fShmSegments->emplace(id, AllocationAlgorithm::rbtree_best_fit);
|
||||
fSegments.emplace(fSegmentId, RBTreeBestFitSegment(create_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(fSegmentId)).c_str(), size));
|
||||
fShmSegments->emplace(fSegmentId, AllocationAlgorithm::rbtree_best_fit);
|
||||
} else if (allocationAlgorithm == "simple_seq_fit") {
|
||||
fSegments.emplace(id, SimpleSeqFitSegment(create_only, std::string("fmq_" + fShmId + "_main").c_str(), size));
|
||||
fShmSegments->emplace(id, AllocationAlgorithm::simple_seq_fit);
|
||||
fSegments.emplace(fSegmentId, SimpleSeqFitSegment(create_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(fSegmentId)).c_str(), size));
|
||||
fShmSegments->emplace(fSegmentId, AllocationAlgorithm::simple_seq_fit);
|
||||
}
|
||||
ss << "Created ";
|
||||
} else {
|
||||
// found segment with the given id, opening
|
||||
if (it->second.fAllocationAlgorithm == AllocationAlgorithm::rbtree_best_fit) {
|
||||
fSegments.emplace(id, RBTreeBestFitSegment(open_only, std::string("fmq_" + fShmId + "_main").c_str()));
|
||||
fSegments.emplace(fSegmentId, RBTreeBestFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(fSegmentId)).c_str()));
|
||||
if (allocationAlgorithm != "rbtree_best_fit") {
|
||||
LOG(warn) << "Allocation algorithm of the opened segment is rbtree_best_fit, but requested is " << allocationAlgorithm << ". Ignoring requested setting.";
|
||||
allocationAlgorithm = "rbtree_best_fit";
|
||||
}
|
||||
} else {
|
||||
fSegments.emplace(id, SimpleSeqFitSegment(open_only, std::string("fmq_" + fShmId + "_main").c_str()));
|
||||
fSegments.emplace(fSegmentId, SimpleSeqFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(fSegmentId)).c_str()));
|
||||
if (allocationAlgorithm != "simple_seq_fit") {
|
||||
LOG(warn) << "Allocation algorithm of the opened segment is simple_seq_fit, but requested is " << allocationAlgorithm << ". Ignoring requested setting.";
|
||||
allocationAlgorithm = "simple_seq_fit";
|
||||
|
@ -142,33 +142,33 @@ class Manager
|
|||
}
|
||||
ss << "Opened ";
|
||||
}
|
||||
ss << "shared memory segment '" << "fmq_" << fShmId << "_main_" << id << "'."
|
||||
<< " Size: " << boost::apply_visitor(SegmentSize{}, fSegments.at(id)) << " bytes."
|
||||
<< " Available: " << boost::apply_visitor(SegmentFreeMemory{}, fSegments.at(id)) << " bytes."
|
||||
ss << "shared memory segment '" << "fmq_" << fShmId << "_m_" << fSegmentId << "'."
|
||||
<< " Size: " << boost::apply_visitor(SegmentSize{}, fSegments.at(fSegmentId)) << " bytes."
|
||||
<< " Available: " << boost::apply_visitor(SegmentFreeMemory{}, fSegments.at(fSegmentId)) << " bytes."
|
||||
<< " Allocation algorithm: " << allocationAlgorithm;
|
||||
LOG(debug) << ss.str();
|
||||
} catch(interprocess_exception&) {
|
||||
LOG(error) << "something went wrong";
|
||||
} catch(interprocess_exception& bie) {
|
||||
LOG(error) << "something went wrong: " << bie.what();
|
||||
}
|
||||
}
|
||||
|
||||
if (mlockSegment) {
|
||||
LOG(debug) << "Locking the managed segment memory pages...";
|
||||
if (mlock(boost::apply_visitor(SegmentAddress{}, fSegments.at(0)), boost::apply_visitor(SegmentSize{}, fSegments.at(0))) == -1) {
|
||||
if (mlock(boost::apply_visitor(SegmentAddress{}, fSegments.at(fSegmentId)), boost::apply_visitor(SegmentSize{}, fSegments.at(fSegmentId))) == -1) {
|
||||
LOG(error) << "Could not lock the managed segment memory. Code: " << errno << ", reason: " << strerror(errno);
|
||||
}
|
||||
LOG(debug) << "Successfully locked the managed segment memory pages.";
|
||||
}
|
||||
if (zeroSegment) {
|
||||
LOG(debug) << "Zeroing the managed segment free memory...";
|
||||
boost::apply_visitor(SegmentMemoryZeroer{}, fSegments.at(0));
|
||||
boost::apply_visitor(SegmentMemoryZeroer{}, fSegments.at(fSegmentId));
|
||||
LOG(debug) << "Successfully zeroed the managed segment free memory.";
|
||||
}
|
||||
|
||||
{
|
||||
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> lock(fShmMtx);
|
||||
|
||||
fShmRegions = fManagementSegment.find_or_construct<Uint64RegionInfoHashMap>(unique_instance)(fShmVoidAlloc);
|
||||
fShmRegions = fManagementSegment.find_or_construct<Uint16RegionInfoHashMap>(unique_instance)(fShmVoidAlloc);
|
||||
|
||||
#ifdef FAIRMQ_DEBUG_MODE
|
||||
fMsgDebug = fManagementSegment.find_or_construct<SizetMsgDebugMap>(unique_instance)(fShmVoidAlloc);
|
||||
|
@ -258,7 +258,7 @@ class Manager
|
|||
}
|
||||
bool Interrupted() { return fInterrupted.load(); }
|
||||
|
||||
std::pair<boost::interprocess::mapped_region*, uint64_t> CreateRegion(const size_t size,
|
||||
std::pair<boost::interprocess::mapped_region*, uint16_t> CreateRegion(const size_t size,
|
||||
const int64_t userFlags,
|
||||
RegionCallback callback,
|
||||
RegionBulkCallback bulkCallback,
|
||||
|
@ -267,10 +267,10 @@ class Manager
|
|||
{
|
||||
using namespace boost::interprocess;
|
||||
try {
|
||||
std::pair<mapped_region*, uint64_t> result;
|
||||
std::pair<mapped_region*, uint16_t> result;
|
||||
|
||||
{
|
||||
uint64_t id = 0;
|
||||
uint16_t id = 0;
|
||||
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> lock(fShmMtx);
|
||||
|
||||
RegionCounter* rc = fManagementSegment.find<RegionCounter>(unique_instance).first;
|
||||
|
@ -314,13 +314,13 @@ class Manager
|
|||
}
|
||||
}
|
||||
|
||||
Region* GetRegion(const uint64_t id)
|
||||
Region* GetRegion(const uint16_t id)
|
||||
{
|
||||
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> lock(fShmMtx);
|
||||
return GetRegionUnsafe(id);
|
||||
}
|
||||
|
||||
Region* GetRegionUnsafe(const uint64_t id)
|
||||
Region* GetRegionUnsafe(const uint16_t id)
|
||||
{
|
||||
// remote region could actually be a local one if a message originates from this device (has been sent out and returned)
|
||||
auto it = fRegions.find(id);
|
||||
|
@ -347,7 +347,7 @@ class Manager
|
|||
}
|
||||
}
|
||||
|
||||
void RemoveRegion(const uint64_t id)
|
||||
void RemoveRegion(const uint16_t id)
|
||||
{
|
||||
fRegions.erase(id);
|
||||
{
|
||||
|
@ -495,13 +495,37 @@ class Manager
|
|||
|
||||
bool ThrowingOnBadAlloc() const { return fThrowOnBadAlloc; }
|
||||
|
||||
boost::interprocess::managed_shared_memory::handle_t GetHandleFromAddress(const void* ptr) const
|
||||
void GetSegment(uint16_t id)
|
||||
{
|
||||
return boost::apply_visitor(SegmentHandleFromAddress{ptr}, fSegments.at(0));
|
||||
auto it = fSegments.find(id);
|
||||
if (it == fSegments.end()) {
|
||||
try {
|
||||
// get region info
|
||||
SegmentInfo segmentInfo = fShmSegments->at(id);
|
||||
LOG(info) << "LOCATED SEGMENT WITH ID '" << id << "'";
|
||||
|
||||
using namespace boost::interprocess;
|
||||
|
||||
if (segmentInfo.fAllocationAlgorithm == AllocationAlgorithm::rbtree_best_fit) {
|
||||
fSegments.emplace(id, RBTreeBestFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(id)).c_str()));
|
||||
} else {
|
||||
fSegments.emplace(id, SimpleSeqFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + std::to_string(id)).c_str()));
|
||||
}
|
||||
} catch (std::out_of_range& oor) {
|
||||
LOG(error) << "Could not get segment with id '" << id << "': " << oor.what();
|
||||
} catch (boost::interprocess::interprocess_exception& bie) {
|
||||
LOG(error) << "Could not get segment with id '" << id << "': " << bie.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
void* GetAddressFromHandle(const boost::interprocess::managed_shared_memory::handle_t handle) const
|
||||
|
||||
boost::interprocess::managed_shared_memory::handle_t GetHandleFromAddress(const void* ptr, uint16_t segmentId) const
|
||||
{
|
||||
return boost::apply_visitor(SegmentAddressFromHandle{handle}, fSegments.at(0));
|
||||
return boost::apply_visitor(SegmentHandleFromAddress{ptr}, fSegments.at(segmentId));
|
||||
}
|
||||
void* GetAddressFromHandle(const boost::interprocess::managed_shared_memory::handle_t handle, uint16_t segmentId) const
|
||||
{
|
||||
return boost::apply_visitor(SegmentAddressFromHandle{handle}, fSegments.at(segmentId));
|
||||
}
|
||||
|
||||
char* Allocate(const size_t size, size_t alignment = 0)
|
||||
|
@ -513,20 +537,20 @@ class Manager
|
|||
try {
|
||||
// boost::interprocess::managed_shared_memory::size_type actualSize = size;
|
||||
// char* hint = 0; // unused for boost::interprocess::allocate_new
|
||||
// ptr = fSegments.at(0).allocation_command<char>(boost::interprocess::allocate_new, size, actualSize, hint);
|
||||
size_t segmentSize = boost::apply_visitor(SegmentSize{}, fSegments.at(0));
|
||||
// ptr = fSegments.at(fSegmentId).allocation_command<char>(boost::interprocess::allocate_new, size, actualSize, hint);
|
||||
size_t segmentSize = boost::apply_visitor(SegmentSize{}, fSegments.at(fSegmentId));
|
||||
if (size > segmentSize) {
|
||||
throw MessageBadAlloc(tools::ToString("Requested message size (", size, ") exceeds segment size (", segmentSize, ")"));
|
||||
}
|
||||
if (alignment == 0) {
|
||||
ptr = reinterpret_cast<char*>(boost::apply_visitor(SegmentAllocate{size}, fSegments.at(0)));
|
||||
ptr = reinterpret_cast<char*>(boost::apply_visitor(SegmentAllocate{size}, fSegments.at(fSegmentId)));
|
||||
} else {
|
||||
ptr = reinterpret_cast<char*>(boost::apply_visitor(SegmentAllocateAligned{size, alignment}, fSegments.at(0)));
|
||||
ptr = reinterpret_cast<char*>(boost::apply_visitor(SegmentAllocateAligned{size, alignment}, fSegments.at(fSegmentId)));
|
||||
}
|
||||
} catch (boost::interprocess::bad_alloc& ba) {
|
||||
// LOG(warn) << "Shared memory full...";
|
||||
if (ThrowingOnBadAlloc()) {
|
||||
throw MessageBadAlloc(tools::ToString("shmem: could not create a message of size ", size, ", alignment: ", (alignment != 0) ? std::to_string(alignment) : "default", ", free memory: ", boost::apply_visitor(SegmentFreeMemory{}, fSegments.at(0))));
|
||||
throw MessageBadAlloc(tools::ToString("shmem: could not create a message of size ", size, ", alignment: ", (alignment != 0) ? std::to_string(alignment) : "default", ", free memory: ", boost::apply_visitor(SegmentFreeMemory{}, fSegments.at(fSegmentId))));
|
||||
}
|
||||
// rateLimiter.maybe_sleep();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
|
@ -546,9 +570,9 @@ class Manager
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void Deallocate(boost::interprocess::managed_shared_memory::handle_t handle)
|
||||
void Deallocate(boost::interprocess::managed_shared_memory::handle_t handle, uint16_t segmentId)
|
||||
{
|
||||
boost::apply_visitor(SegmentDeallocate{GetAddressFromHandle(handle)}, fSegments.at(0));
|
||||
boost::apply_visitor(SegmentDeallocate{GetAddressFromHandle(handle, segmentId)}, fSegments.at(segmentId));
|
||||
#ifdef FAIRMQ_DEBUG_MODE
|
||||
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> lock(fShmMtx);
|
||||
DecrementShmMsgCounter();
|
||||
|
@ -556,11 +580,13 @@ class Manager
|
|||
#endif
|
||||
}
|
||||
|
||||
char* ShrinkInPlace(size_t oldSize, size_t newSize, char* localPtr)
|
||||
char* ShrinkInPlace(size_t oldSize, size_t newSize, char* localPtr, uint16_t segmentId)
|
||||
{
|
||||
return boost::apply_visitor(SegmentBufferShrink{oldSize, newSize, localPtr}, fSegments.at(0));
|
||||
return boost::apply_visitor(SegmentBufferShrink{oldSize, newSize, localPtr}, fSegments.at(segmentId));
|
||||
}
|
||||
|
||||
uint16_t GetSegmentId() const { return fSegmentId; }
|
||||
|
||||
~Manager()
|
||||
{
|
||||
using namespace boost::interprocess;
|
||||
|
@ -599,8 +625,9 @@ class Manager
|
|||
|
||||
private:
|
||||
std::string fShmId;
|
||||
uint16_t fSegmentId;
|
||||
std::string fDeviceId;
|
||||
std::unordered_map<uint64_t, boost::variant<RBTreeBestFitSegment, SimpleSeqFitSegment>> fSegments;
|
||||
std::unordered_map<uint16_t, boost::variant<RBTreeBestFitSegment, SimpleSeqFitSegment>> fSegments;
|
||||
boost::interprocess::managed_shared_memory fManagementSegment;
|
||||
VoidAlloc fShmVoidAlloc;
|
||||
boost::interprocess::named_mutex fShmMtx;
|
||||
|
@ -609,12 +636,12 @@ class Manager
|
|||
std::thread fRegionEventThread;
|
||||
bool fRegionEventsSubscriptionActive;
|
||||
std::function<void(fair::mq::RegionInfo)> fRegionEventCallback;
|
||||
std::unordered_map<uint64_t, RegionEvent> fObservedRegionEvents;
|
||||
std::unordered_map<uint16_t, RegionEvent> fObservedRegionEvents;
|
||||
|
||||
DeviceCounter* fDeviceCounter;
|
||||
Uint64SegmentInfoHashMap* fShmSegments;
|
||||
Uint64RegionInfoHashMap* fShmRegions;
|
||||
std::unordered_map<uint64_t, std::unique_ptr<Region>> fRegions;
|
||||
Uint16SegmentInfoHashMap* fShmSegments;
|
||||
Uint16RegionInfoHashMap* fShmRegions;
|
||||
std::unordered_map<uint16_t, std::unique_ptr<Region>> fRegions;
|
||||
|
||||
std::atomic<bool> fInterrupted;
|
||||
std::atomic<int32_t> fMsgCounter; // TODO: find a better lifetime solution instead of the counter
|
||||
|
|
|
@ -43,7 +43,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{0, 0, 0, -1}
|
||||
, fMeta{0, 0, 0, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(nullptr)
|
||||
{
|
||||
|
@ -54,7 +54,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{0, 0, 0, -1}
|
||||
, fMeta{0, 0, 0, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(nullptr)
|
||||
{
|
||||
|
@ -65,7 +65,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{0, 0, 0, -1}
|
||||
, fMeta{0, 0, 0, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(nullptr)
|
||||
{
|
||||
|
@ -77,7 +77,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{0, 0, 0, -1}
|
||||
, fMeta{0, 0, 0, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(nullptr)
|
||||
{
|
||||
|
@ -89,7 +89,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{0, 0, 0, -1}
|
||||
, fMeta{0, 0, 0, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(nullptr)
|
||||
{
|
||||
|
@ -108,7 +108,7 @@ class Message final : public fair::mq::Message
|
|||
: fair::mq::Message(factory)
|
||||
, fManager(manager)
|
||||
, fQueued(false)
|
||||
, fMeta{size, static_cast<UnmanagedRegion*>(region.get())->fRegionId, reinterpret_cast<size_t>(hint), -1}
|
||||
, fMeta{size, reinterpret_cast<size_t>(hint), static_cast<UnmanagedRegion*>(region.get())->fRegionId, fManager.GetSegmentId(), -1}
|
||||
, fRegionPtr(nullptr)
|
||||
, fLocalPtr(static_cast<char*>(data))
|
||||
{
|
||||
|
@ -169,7 +169,8 @@ class Message final : public fair::mq::Message
|
|||
if (!fLocalPtr) {
|
||||
if (fMeta.fRegionId == 0) {
|
||||
if (fMeta.fSize > 0) {
|
||||
fLocalPtr = reinterpret_cast<char*>(fManager.GetAddressFromHandle(fMeta.fHandle));
|
||||
fManager.GetSegment(fMeta.fSegmentId);
|
||||
fLocalPtr = reinterpret_cast<char*>(fManager.GetAddressFromHandle(fMeta.fHandle, fMeta.fSegmentId));
|
||||
} else {
|
||||
fLocalPtr = nullptr;
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ class Message final : public fair::mq::Message
|
|||
return true;
|
||||
} else if (newSize <= fMeta.fSize) {
|
||||
try {
|
||||
fLocalPtr = fManager.ShrinkInPlace(fMeta.fSize, newSize, fLocalPtr);
|
||||
fLocalPtr = fManager.ShrinkInPlace(fMeta.fSize, newSize, fLocalPtr, fMeta.fSegmentId);
|
||||
fMeta.fSize = newSize;
|
||||
return true;
|
||||
} catch (boost::interprocess::interprocess_exception& e) {
|
||||
|
@ -248,7 +249,7 @@ class Message final : public fair::mq::Message
|
|||
{
|
||||
fLocalPtr = fManager.Allocate(size, alignment);
|
||||
if (fLocalPtr) {
|
||||
fMeta.fHandle = fManager.GetHandleFromAddress(fLocalPtr);
|
||||
fMeta.fHandle = fManager.GetHandleFromAddress(fLocalPtr, fMeta.fSegmentId);
|
||||
fMeta.fSize = size;
|
||||
}
|
||||
return fLocalPtr;
|
||||
|
@ -258,7 +259,8 @@ class Message final : public fair::mq::Message
|
|||
{
|
||||
if (fMeta.fHandle >= 0 && !fQueued) {
|
||||
if (fMeta.fRegionId == 0) {
|
||||
fManager.Deallocate(fMeta.fHandle);
|
||||
fManager.GetSegment(fMeta.fSegmentId);
|
||||
fManager.Deallocate(fMeta.fHandle, fMeta.fSegmentId);
|
||||
fMeta.fHandle = -1;
|
||||
} else {
|
||||
if (!fRegionPtr) {
|
||||
|
|
|
@ -63,7 +63,7 @@ Monitor::Monitor(const string& shmId, bool selfDestruct, bool interactive, bool
|
|||
, fTimeoutInMS(timeoutInMS)
|
||||
, fIntervalInMS(intervalInMS)
|
||||
, fShmId(shmId)
|
||||
, fSegmentName("fmq_" + fShmId + "_main")
|
||||
, fSegmentName("fmq_" + fShmId + "_m_0")
|
||||
, fManagementSegmentName("fmq_" + fShmId + "_mng")
|
||||
, fControlQueueName("fmq_" + fShmId + "_cq")
|
||||
, fTerminating(false)
|
||||
|
@ -280,23 +280,19 @@ void Monitor::CheckSegment()
|
|||
try {
|
||||
managed_shared_memory managementSegment(open_only, fManagementSegmentName.c_str());
|
||||
|
||||
Uint64SegmentInfoHashMap* segmentInfos = managementSegment.find<Uint64SegmentInfoHashMap>(unique_instance).first;
|
||||
std::unordered_map<uint64_t, boost::variant<RBTreeBestFitSegment, SimpleSeqFitSegment>> segments;
|
||||
Uint16SegmentInfoHashMap* segmentInfos = managementSegment.find<Uint16SegmentInfoHashMap>(unique_instance).first;
|
||||
std::unordered_map<uint16_t, boost::variant<RBTreeBestFitSegment, SimpleSeqFitSegment>> segments;
|
||||
|
||||
if (!segmentInfos) {
|
||||
cout << "Found management segment, but cannot locate segment info, something went wrong..." << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
const uint64_t id = 0;
|
||||
|
||||
auto it = segmentInfos->find(id);
|
||||
if (it != segmentInfos->end()) {
|
||||
// found segment with the given id, opening
|
||||
if (it->second.fAllocationAlgorithm == AllocationAlgorithm::rbtree_best_fit) {
|
||||
segments.emplace(id, RBTreeBestFitSegment(open_only, fSegmentName.c_str()));
|
||||
for (const auto& s : *segmentInfos) {
|
||||
if (s.second.fAllocationAlgorithm == AllocationAlgorithm::rbtree_best_fit) {
|
||||
segments.emplace(s.first, RBTreeBestFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + to_string(s.first)).c_str()));
|
||||
} else {
|
||||
segments.emplace(id, SimpleSeqFitSegment(open_only, fSegmentName.c_str()));
|
||||
segments.emplace(s.first, SimpleSeqFitSegment(open_only, std::string("fmq_" + fShmId + "_m_" + to_string(s.first)).c_str()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,8 +332,8 @@ void Monitor::CheckSegment()
|
|||
if (fInteractive) {
|
||||
cout << "| "
|
||||
<< setw(18) << fSegmentName << " | "
|
||||
<< setw(10) << boost::apply_visitor(SegmentSize{}, segments.at(id)) << " | "
|
||||
<< setw(10) << boost::apply_visitor(SegmentFreeMemory{}, segments.at(id)) << " | "
|
||||
<< setw(10) << boost::apply_visitor(SegmentSize{}, segments.at(0)) << " | "
|
||||
<< setw(10) << boost::apply_visitor(SegmentFreeMemory{}, segments.at(0)) << " | "
|
||||
<< setw(8) << numDevices << " | "
|
||||
#ifdef FAIRMQ_DEBUG_MODE
|
||||
<< setw(8) << numMessages << " | "
|
||||
|
@ -347,8 +343,8 @@ void Monitor::CheckSegment()
|
|||
<< setw(10) << (fViewOnly ? "view only" : to_string(duration)) << " |"
|
||||
<< c << flush;
|
||||
} else if (fViewOnly) {
|
||||
size_t free = boost::apply_visitor(SegmentFreeMemory{}, segments.at(id));
|
||||
size_t total = boost::apply_visitor(SegmentSize{}, segments.at(id));
|
||||
size_t free = boost::apply_visitor(SegmentFreeMemory{}, segments.at(0));
|
||||
size_t total = boost::apply_visitor(SegmentSize{}, segments.at(0));
|
||||
size_t used = total - free;
|
||||
// size_t mfree = managementSegment.get_free_memory();
|
||||
// size_t mtotal = managementSegment.get_size();
|
||||
|
@ -528,52 +524,60 @@ std::vector<std::pair<std::string, bool>> Monitor::Cleanup(const ShmId& shmId, b
|
|||
string managementSegmentName("fmq_" + shmId.shmId + "_mng");
|
||||
try {
|
||||
bipc::managed_shared_memory managementSegment(bipc::open_only, managementSegmentName.c_str());
|
||||
RegionCounter* rc = managementSegment.find<RegionCounter>(bipc::unique_instance).first;
|
||||
if (rc) {
|
||||
if (verbose) {
|
||||
cout << "Region counter found: " << rc->fCount << endl;
|
||||
}
|
||||
uint64_t regionCount = rc->fCount;
|
||||
|
||||
Uint64RegionInfoMap* m = managementSegment.find<Uint64RegionInfoMap>(bipc::unique_instance).first;
|
||||
try {
|
||||
RegionCounter* rc = managementSegment.find<RegionCounter>(bipc::unique_instance).first;
|
||||
if (rc) {
|
||||
if (verbose) {
|
||||
cout << "Region counter found: " << rc->fCount << endl;
|
||||
}
|
||||
uint16_t regionCount = rc->fCount;
|
||||
|
||||
for (uint64_t i = 1; i <= regionCount; ++i) {
|
||||
if (m != nullptr) {
|
||||
RegionInfo ri = m->at(i);
|
||||
string path = ri.fPath.c_str();
|
||||
int flags = ri.fFlags;
|
||||
if (verbose) {
|
||||
cout << "Found RegionInfo with path: '" << path << "', flags: " << flags << ", fDestroyed: " << ri.fDestroyed << "." << endl;
|
||||
}
|
||||
if (path != "") {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveFileMapping, path + "fmq_" + shmId.shmId + "_rg_" + to_string(i), verbose));
|
||||
Uint16RegionInfoMap* m = managementSegment.find<Uint16RegionInfoMap>(bipc::unique_instance).first;
|
||||
|
||||
for (uint16_t i = 1; i <= regionCount; ++i) {
|
||||
if (m != nullptr) {
|
||||
RegionInfo ri = m->at(i);
|
||||
string path = ri.fPath.c_str();
|
||||
int flags = ri.fFlags;
|
||||
if (verbose) {
|
||||
cout << "Found RegionInfo with path: '" << path << "', flags: " << flags << ", fDestroyed: " << ri.fDestroyed << "." << endl;
|
||||
}
|
||||
if (path != "") {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveFileMapping, path + "fmq_" + shmId.shmId + "_rg_" + to_string(i), verbose));
|
||||
} else {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, "fmq_" + shmId.shmId + "_rg_" + to_string(i), verbose));
|
||||
}
|
||||
} else {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, "fmq_" + shmId.shmId + "_rg_" + to_string(i), verbose));
|
||||
}
|
||||
} else {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, "fmq_" + shmId.shmId + "_rg_" + to_string(i), verbose));
|
||||
}
|
||||
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveQueue, string("fmq_" + shmId.shmId + "_rgq_" + to_string(i)), verbose));
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveQueue, string("fmq_" + shmId.shmId + "_rgq_" + to_string(i)), verbose));
|
||||
}
|
||||
} else {
|
||||
if (verbose) {
|
||||
cout << "No region counter found. No regions to cleanup." << endl;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} catch(out_of_range& oor) {
|
||||
if (verbose) {
|
||||
cout << "No region counter found. No regions to cleanup." << endl;
|
||||
cout << "Could not locate element in the region map, out of range: " << oor.what() << endl;
|
||||
}
|
||||
}
|
||||
|
||||
Uint16SegmentInfoHashMap* segmentInfos = managementSegment.find<Uint16SegmentInfoHashMap>(bipc::unique_instance).first;
|
||||
|
||||
for (const auto& s : *segmentInfos) {
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, "fmq_" + shmId.shmId + "_m_" + to_string(s.first), verbose));
|
||||
}
|
||||
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, managementSegmentName.c_str(), verbose));
|
||||
} catch (bie&) {
|
||||
if (verbose) {
|
||||
cout << "Did not find '" << managementSegmentName << "' shared memory segment. No regions to cleanup." << endl;
|
||||
}
|
||||
} catch(out_of_range& oor) {
|
||||
if (verbose) {
|
||||
cout << "Could not locate element in the region map, out of range: " << oor.what() << endl;
|
||||
}
|
||||
}
|
||||
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveObject, "fmq_" + shmId.shmId + "_main", verbose));
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveMutex, "fmq_" + shmId.shmId + "_mtx", verbose));
|
||||
result.emplace_back(RunRemoval(Monitor::RemoveCondition, "fmq_" + shmId.shmId + "_cv", verbose));
|
||||
|
||||
|
|
|
@ -10,16 +10,16 @@ Devices track and cleanup shared memory on shutdown. For more information on the
|
|||
|
||||
FairMQ Shared Memory currently uses the following names to register shared memory on the system:
|
||||
|
||||
| name | info | created by | used by |
|
||||
| ------------------------- | ---------------------------------------------- | ------------------ | ------------------------------ |
|
||||
| `fmq_<shmId>_main` | main segment (user data) | one of the devices | devices |
|
||||
| `fmq_<shmId>_mng` | management segment (management data) | one of the devices | devices |
|
||||
| `fmq_<shmId>_mtx` | mutex | one of the devices | devices |
|
||||
| `fmq_<shmId>_cv` | condition variable | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_rg_<index>` | unmanaged region(s) | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_rgq_<index>` | unmanaged region queue(s) | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_ms` | shmmonitor status | shmmonitor | devices, shmmonitor |
|
||||
| `fmq_<shmId>_cq` | message queue between transport and shmmonitor | shmmonitor | devices, shmmonitor |
|
||||
| name | info | created by | used by |
|
||||
| --------------------------- | ---------------------------------------------- | ------------------ | ------------------------------ |
|
||||
| `fmq_<shmId>_m_<segmentId>` | managed segment(s) (user data) | one of the devices | devices |
|
||||
| `fmq_<shmId>_mng` | management segment (management data) | one of the devices | devices |
|
||||
| `fmq_<shmId>_mtx` | mutex | one of the devices | devices |
|
||||
| `fmq_<shmId>_cv` | condition variable | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_rg_<index>` | unmanaged region(s) | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_rgq_<index>` | unmanaged region queue(s) | one of the devices | devices with unmanaged regions |
|
||||
| `fmq_<shmId>_ms` | shmmonitor status | shmmonitor | devices, shmmonitor |
|
||||
| `fmq_<shmId>_cq` | message queue between transport and shmmonitor | shmmonitor | devices, shmmonitor |
|
||||
|
||||
The shmId is generated out of session id and user id.
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ namespace shmem
|
|||
|
||||
struct Region
|
||||
{
|
||||
Region(const std::string& shmId, uint64_t id, uint64_t size, bool remote, RegionCallback callback, RegionBulkCallback bulkCallback, const std::string& path, int flags)
|
||||
Region(const std::string& shmId, uint16_t id, uint64_t size, bool remote, RegionCallback callback, RegionBulkCallback bulkCallback, const std::string& path, int flags)
|
||||
: fRemote(remote)
|
||||
, fLinger(100)
|
||||
, fStop(false)
|
||||
|
|
|
@ -56,7 +56,7 @@ class UnmanagedRegion final : public fair::mq::UnmanagedRegion
|
|||
|
||||
void* GetData() const override { return fRegion->get_address(); }
|
||||
size_t GetSize() const override { return fRegion->get_size(); }
|
||||
uint64_t GetId() const override { return fRegionId; }
|
||||
uint16_t GetId() const override { return fRegionId; }
|
||||
void SetLinger(uint32_t linger) override { fManager.GetRegion(fRegionId)->SetLinger(linger); }
|
||||
uint32_t GetLinger() const override { return fManager.GetRegion(fRegionId)->GetLinger(); }
|
||||
|
||||
|
@ -65,7 +65,7 @@ class UnmanagedRegion final : public fair::mq::UnmanagedRegion
|
|||
private:
|
||||
Manager& fManager;
|
||||
boost::interprocess::mapped_region* fRegion;
|
||||
uint64_t fRegionId;
|
||||
uint16_t fRegionId;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -114,13 +114,13 @@ class Context
|
|||
return fRegionInfos;
|
||||
}
|
||||
|
||||
uint64_t RegionCount() const
|
||||
uint16_t RegionCount() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fMtx);
|
||||
return fRegionCounter;
|
||||
}
|
||||
|
||||
void AddRegion(bool managed, uint64_t id, void* ptr, size_t size, int64_t userFlags, RegionEvent event)
|
||||
void AddRegion(bool managed, uint16_t id, void* ptr, size_t size, int64_t userFlags, RegionEvent event)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fMtx);
|
||||
|
@ -131,7 +131,7 @@ class Context
|
|||
fRegionEventsCV.notify_one();
|
||||
}
|
||||
|
||||
void RemoveRegion(uint64_t id)
|
||||
void RemoveRegion(uint16_t id)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fMtx);
|
||||
|
@ -182,7 +182,7 @@ class Context
|
|||
mutable std::mutex fMtx;
|
||||
std::atomic<bool> fInterrupted;
|
||||
|
||||
uint64_t fRegionCounter;
|
||||
uint16_t fRegionCounter;
|
||||
std::condition_variable fRegionEventsCV;
|
||||
std::vector<RegionInfo> fRegionInfos;
|
||||
std::queue<RegionInfo> fRegionEvents;
|
||||
|
|
|
@ -50,7 +50,7 @@ class UnmanagedRegion final : public fair::mq::UnmanagedRegion
|
|||
|
||||
virtual void* GetData() const override { return fBuffer; }
|
||||
virtual size_t GetSize() const override { return fSize; }
|
||||
uint64_t GetId() const override { return fId; }
|
||||
uint16_t GetId() const override { return fId; }
|
||||
int64_t GetUserFlags() const { return fUserFlags; }
|
||||
void SetLinger(uint32_t /* linger */) override { LOG(debug) << "ZeroMQ UnmanagedRegion linger option not implemented. Acknowledgements are local."; }
|
||||
uint32_t GetLinger() const override { LOG(debug) << "ZeroMQ UnmanagedRegion linger option not implemented. Acknowledgements are local."; return 0; }
|
||||
|
@ -64,7 +64,7 @@ class UnmanagedRegion final : public fair::mq::UnmanagedRegion
|
|||
|
||||
private:
|
||||
Context& fCtx;
|
||||
uint64_t fId;
|
||||
uint16_t fId;
|
||||
void* fBuffer;
|
||||
size_t fSize;
|
||||
int64_t fUserFlags;
|
||||
|
|
Loading…
Reference in New Issue
Block a user