Re: Atomic caching of smart pointers

Liste des GroupesRevenir à cl c++ 
Sujet : Re: Atomic caching of smart pointers
De : eesnimi (at) *nospam* osa.pri.ee (Paavo Helde)
Groupes : comp.lang.c++
Date : 17. Sep 2024, 10:22:34
Autres entêtes
Organisation : A noiseless patient Spider
Message-ID : <vcbhoq$3epub$1@dont-email.me>
References : 1 2 3 4 5 6 7
User-Agent : Mozilla Thunderbird
On 17.09.2024 09:04, Chris M. Thomasson wrote:
On 9/16/2024 10:59 PM, Chris M. Thomasson wrote:
On 9/16/2024 10:54 PM, Paavo Helde wrote:
[...]
template<typename T>
class CachedAtomicPtr {
public:
     CachedAtomicPtr(): ptr_(nullptr) {}
>
     /// Store p in *this if *this is not yet assigned.
     /// Return pointer stored in *this, which can be \a p or not.
     Ptr<T> AssignIfNull(Ptr<T> p) {
         const T* other = nullptr;
         if (ptr_.compare_exchange_weak(other, p.get(), std::memory_order_release, std::memory_order_acquire)) {
             p->IncrementRefcount();
             return p;
         } else {
             // wrap in an extra smartptr (increments refcount)
             return Ptr<T>(other);
         }
 ^^^^^^^^^^^^^^^^^^
 Is Ptr<T> an intrusive reference count? I assume it is.
Yes. Otherwise I could not generate new smartpointers from bare T*.
FYI, here is my current full compilable code together with a test harness (no relacy, could not get it working, so this just creates a number of threads which make use of the CachedAtomicPtr objects in parallel.
#include <cstddef>
#include <atomic>
#include <iostream>
#include <stdexcept>
#include <deque>
#include <mutex>
#include <thread>
#include <vector>
/// debug instrumentation
std::atomic<int> gAcount = 0, gBcount = 0, gCASFailureCount = 0;
/// program exit code
std::atomic<int> exitCode = EXIT_SUCCESS;
void Assert(bool x) {
if (!x) {
throw std::logic_error("Assert failed");
}
}
class RefCountedBase {
public:
RefCountedBase(): refcount_(0) {}
RefCountedBase(const RefCountedBase&): refcount_(0) {}
RefCountedBase(RefCountedBase&&) = delete;
RefCountedBase& operator=(const RefCountedBase&) = delete;
RefCountedBase& operator=(RefCountedBase&&) = delete;
void Capture() const noexcept {
++refcount_;
}
void Release() const noexcept {
if (--refcount_ == 0) {
delete const_cast<RefCountedBase*>(this);
}
}
virtual ~RefCountedBase() {}

private:
mutable std::atomic<std::size_t> refcount_;
};
template<class T>
class Ptr {
public:
Ptr(): ptr_(nullptr) {}
explicit Ptr(const T* ptr): ptr_(ptr) { if (ptr_) { ptr_->Capture(); } }
Ptr(const Ptr& b): ptr_(b.ptr_) { if (ptr_) { ptr_->Capture(); } }
Ptr(Ptr&& b) noexcept: ptr_(b.ptr_) { b.ptr_ = nullptr; }
~Ptr() { if (ptr_) { ptr_->Release(); } }
Ptr& operator=(const Ptr& b) {
if (b.ptr_) { b.ptr_->Capture(); }
if (ptr_) { ptr_->Release(); }
ptr_ = b.ptr_;
return *this;
}
Ptr& operator=(Ptr&& b) noexcept {
if (ptr_) { ptr_->Release(); }
ptr_ = b.ptr_;
b.ptr_ = nullptr;
return *this;
}
const T* operator->() const { return ptr_; }
const T& operator*() const { return *ptr_; }
explicit operator bool() const { return ptr_!=nullptr; }
const T* get() const { return ptr_; }
private:
mutable const T* ptr_;
};
template<typename T>
class CachedAtomicPtr {
public:
CachedAtomicPtr(): ptr_(nullptr) {}
/// Store p in *this if *this is not yet assigned.
/// Return pointer stored in *this, which can be \a p or not.
Ptr<T> AssignIfNull(Ptr<T> p) {
const T* other = nullptr;
if (ptr_.compare_exchange_strong(other, p.get(), std::memory_order_release, std::memory_order_acquire)) {
p->Capture();
return p;
} else {
++gCASFailureCount;
return Ptr<T>(other);
}
}
Ptr<T> Load() const {
return Ptr<T>(ptr_);
}
~CachedAtomicPtr() {
if (const T* ptr = ptr_) {
ptr->Release();
}
}

/// A copy is made for modifying the enclosing object, which would make the cached value wrong, so we reset it to null on copying.
CachedAtomicPtr(const CachedAtomicPtr& other): ptr_(nullptr) {}
/// Any standard operations modifying ptr_ during the object lifetime would create data races and need to be forbidden.
CachedAtomicPtr(CachedAtomicPtr&&) = delete;
CachedAtomicPtr& operator=(const CachedAtomicPtr&) = delete;
CachedAtomicPtr& operator=(CachedAtomicPtr&&) = delete;
private:
std::atomic<const T*> ptr_;
};
class B: public RefCountedBase {
public:
static Ptr<B> Make(int data) {
return Ptr<B>(new B(data));
}
int Data() const {
return data_;
}
Ptr<B> Clone() const {
return Ptr<B>(new B(*this));
}
private:
B(int data): data_(data) { ++gBcount; }
B(const B& other): data_(other.data_) { ++gBcount; }
~B() { data_ = -1;  Assert(typeid(*this) == typeid(B)); --gBcount; }
private:
int data_;
};
class A: public RefCountedBase {
public:
static Ptr<A> Make(int data) {
return Ptr<A>(new A(data));
}
Ptr<B> GetOrCalcB() const {
Ptr<B> b = b_.Load();
if (!b) {
b = b_.AssignIfNull(CalcB());
}
if (!b) {
Assert(false);
}
return b;
}
int Data() const {
return data_;
}
Ptr<A> Clone() const {
return Ptr<A>(new A(*this));
}
private:
A(int data): data_(data) { ++gAcount; }
A(const A& other): data_(other.data_) { ++gAcount; }
~A() { --gAcount; }
Ptr<B> CalcB() const {
return B::Make(2 * data_);
}
private:
int data_;
mutable CachedAtomicPtr<B> b_;
};
// Test harness
class Queue {
public:
void Push(Ptr<A> a) {
{
std::unique_lock<std::mutex> lock{ mutex_ };
deque_.push_back(a);
}
cond_.notify_all();
}
Ptr<A> Pop() {
std::unique_lock<std::mutex> lock{ mutex_ };
cond_.wait(lock, [this]() {return !deque_.empty(); });
Ptr<A> a = deque_.front();
deque_.pop_front();
return a;
}
private:
std::mutex mutex_;
std::condition_variable cond_;
std::deque<Ptr<A>> deque_;
};
void Test1(Ptr<A>&& a) {
Ptr<B> b = a->GetOrCalcB();
int m = a->Data();
// Destroy a, b must remain alive.
a = Ptr<A>();
int k = b->Data();
Assert(k == 2 * m);
}
void Test2(Ptr<A>&& a) {
int m = a->Data();
Ptr<A> a2 = a->Clone();
int m2 = a2->Data();
Assert(m == m2);
Ptr<B> b = a->GetOrCalcB();
Ptr<B> b2 = a2->GetOrCalcB();
Ptr<B> b3 = a->Clone()->GetOrCalcB();
a = Ptr<A>();
Ptr<B> b4 = b;
Ptr<B> b5 = b->Clone();
int k = b->Data();
Assert(k == 2 * m);
int k2 = b2->Data();
Assert(k == 2 * m);
int k3 = b3->Data();
Assert(k == 2 * m);
int k4 = b5->Data();
Assert(k == 2 * m);
int k5 = b4->Data();
Assert(k == 2 * m);
}
int main() {
try {
/// Number of parallel threads
int N = 16;
/// Number of iterations
int n = 1000000;
std::vector<Queue> queues(N);
std::vector<std::thread> threads;
for (int i = 0; i < N; ++i) {
Queue& queue = queues[i];
threads.emplace_back([&queue]() {
try {
while (true) {
Ptr<A> a = queue.Pop();
if (!a) {
break;
}
Test1(std::move(a));
a = queue.Pop();
if (!a) {
break;
}
Test2(std::move(a));
}
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
exitCode = EXIT_FAILURE;
}
}
);
}
for (int i = 0; i < n; ++i) {
if (i % (n / 100) == 0) {
std::cout << i / (n / 100) << "%\n";
}
Ptr<A> a = A::Make(i);
for (int j = 0; j < N; ++j) {
queues[j].Push(a);
}
}
for (int j = 0; j < N; ++j) {
queues[j].Push(Ptr<A>());
}
for (int j = 0; j < N; ++j) {
threads[j].join();
}
std::cout << "n = " << n << "\n";
std::cout << "Leaked gAcount = " << gAcount << "\n";
std::cout << "Leaked gBcount = " << gBcount << "\n";
std::cout << "gCASFailureCount = " << gCASFailureCount << " (" << (gCASFailureCount * 100.0 / (n * N)) << "%)\n";
if (gAcount || gBcount) {
std::cerr << "Error: there were leaks or double destroys.\n";
exitCode = EXIT_FAILURE;
}
if (gCASFailureCount == 0) {
std::cerr << "Warning: no CAS failures, so could not test the code fully (single-core machine?)\n";
}
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
exitCode = EXIT_FAILURE;
}
return exitCode;
}

Date Sujet#  Auteur
15 Sep 24 * Atomic caching of smart pointers30Paavo Helde
15 Sep 24 +* Re: Atomic caching of smart pointers17Chris M. Thomasson
16 Sep 24 i`* Re: Atomic caching of smart pointers16Paavo Helde
16 Sep 24 i `* Re: Atomic caching of smart pointers15Chris M. Thomasson
16 Sep 24 i  +* Re: Atomic caching of smart pointers2Chris M. Thomasson
17 Sep 24 i  i`- Re: Atomic caching of smart pointers1Paavo Helde
17 Sep 24 i  `* Re: Atomic caching of smart pointers12Paavo Helde
17 Sep 24 i   `* Re: Atomic caching of smart pointers11Chris M. Thomasson
17 Sep 24 i    `* Re: Atomic caching of smart pointers10Chris M. Thomasson
17 Sep 24 i     `* Re: Atomic caching of smart pointers9Paavo Helde
17 Sep 24 i      +- Re: Atomic caching of smart pointers1Chris M. Thomasson
26 Sep 24 i      `* Re: Atomic caching of smart pointers7Chris M. Thomasson
26 Sep 24 i       `* Re: Atomic caching of smart pointers6Paavo Helde
27 Sep 24 i        `* Re: Atomic caching of smart pointers5Chris M. Thomasson
27 Sep 24 i         +* Re: Atomic caching of smart pointers2Chris M. Thomasson
28 Sep 24 i         i`- Re: Atomic caching of smart pointers1Paavo Helde
28 Sep 24 i         `* Re: Atomic caching of smart pointers2Paavo Helde
29 Sep 24 i          `- Re: Atomic caching of smart pointers1Chris M. Thomasson
15 Sep 24 +* Re: Atomic caching of smart pointers8Chris M. Thomasson
16 Sep 24 i`* Re: Atomic caching of smart pointers7Paavo Helde
16 Sep 24 i +* Re: Atomic caching of smart pointers2Bonita Montero
16 Sep 24 i i`- Re: Atomic caching of smart pointers1Chris M. Thomasson
16 Sep 24 i +* Re: Atomic caching of smart pointers2Marcel Mueller
16 Sep 24 i i`- Re: Atomic caching of smart pointers1Chris M. Thomasson
16 Sep 24 i +- Re: Atomic caching of smart pointers1Bonita Montero
16 Sep 24 i `- Re: Atomic caching of smart pointers1Chris M. Thomasson
16 Sep 24 +* Re: Atomic caching of smart pointers2Muttley
16 Sep 24 i`- Re: Atomic caching of smart pointers1Paavo Helde
16 Sep 24 `* Re: Atomic caching of smart pointers2Bonita Montero
16 Sep 24  `- Re: Atomic caching of smart pointers1Chris M. Thomasson

Haut de la page

Les messages affichés proviennent d'usenet.

NewsPortal