11 #include <condition_variable> 111 }
while (!
threadInfos.compare_exchange_weak(head,
this));
128 LOCK(csThreadInfosDelete);
130 std::atomic<RCUInfos *> *ptr;
137 if (current ==
this) {
141 assert(current !=
nullptr);
142 ptr = ¤t->
next;
154 if (!ptr->compare_exchange_strong(current,
next.load())) {
185 static std::condition_variable cond;
192 }
while (!cond.wait_for(lock, std::chrono::microseconds(1), [&] {
193 return cleanups.empty() && hasSyncedTo(syncRev);
208 while (it !=
cleanups.end() && it->first <= syncedTo) {
217 uint64_t syncedTo =
revision.load();
223 while (current !=
nullptr) {
224 syncedTo = std::min(syncedTo, current->
state.load());
225 if (syncedTo < cutoff) {
229 current = current->
next.load();
static constexpr int RCU_ACTIVE_LOOP_COUNT
How many time a busy loop runs before yelding.
static thread_local RCUInfos infos
static RecursiveMutex csThreadInfosDelete
uint64_t hasSyncedTo(uint64_t cutoff=UNLOCKED)
static std::atomic< RCUInfos * > threadInfos
We maintain a linked list of all the RCUInfos for each active thread.
std::map< uint64_t, std::function< void()> > cleanups
static std::atomic< uint64_t > revision
#define WAIT_LOCK(cs, name)
std::atomic< RCUInfos * > next
std::atomic< uint64_t > state