Liste des Groupes | Revenir à cl c++ |
On 26.09.2024 08:49, Chris M. Thomasson wrote:^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^On 9/17/2024 2:22 AM, Paavo Helde wrote:On 17.09.2024 09:04, Chris M. Thomasson wrote:On 9/16/2024 10:59 PM, Chris M. Thomasson wrote:>On 9/16/2024 10:54 PM, Paavo Helde wrote:[...]>template<typename T>
class CachedAtomicPtr {
public:
CachedAtomicPtr(): ptr_(nullptr) {}
>
/// Store p in *this if *this is not yet assigned.
/// Return pointer stored in *this, which can be \a p or not.
Ptr<T> AssignIfNull(Ptr<T> p) {
const T* other = nullptr;
if (ptr_.compare_exchange_weak(other, p.get(), std::memory_order_release, std::memory_order_acquire)) {
p->IncrementRefcount();
return p;
} else {
// wrap in an extra smartptr (increments refcount)
return Ptr<T>(other);
}
^^^^^^^^^^^^^^^^^^
>
Is Ptr<T> an intrusive reference count? I assume it is.
Yes. Otherwise I could not generate new smartpointers from bare T*.
>
>
FYI, here is my current full compilable code together with a test harness (no relacy, could not get it working, so this just creates a number of threads which make use of the CachedAtomicPtr objects in parallel.
>
#include <cstddef>
#include <atomic>
#include <iostream>
#include <stdexcept>
#include <deque>
#include <mutex>
#include <thread>
#include <vector>
>
/// debug instrumentation
std::atomic<int> gAcount = 0, gBcount = 0, gCASFailureCount = 0;
/// program exit code
std::atomic<int> exitCode = EXIT_SUCCESS;
>
void Assert(bool x) {
if (!x) {
throw std::logic_error("Assert failed");
}
}
>
class RefCountedBase {
public:
RefCountedBase(): refcount_(0) {}
RefCountedBase(const RefCountedBase&): refcount_(0) {}
RefCountedBase(RefCountedBase&&) = delete;
RefCountedBase& operator=(const RefCountedBase&) = delete;
RefCountedBase& operator=(RefCountedBase&&) = delete;
>
void Capture() const noexcept {
++refcount_;
}
void Release() const noexcept {
if (--refcount_ == 0) {
delete const_cast<RefCountedBase*>(this);
}
}
virtual ~RefCountedBase() {}
>
>
private:
mutable std::atomic<std::size_t> refcount_;
};
>
template<class T>
class Ptr {
public:
Ptr(): ptr_(nullptr) {}
explicit Ptr(const T* ptr): ptr_(ptr) { if (ptr_) { ptr_- >Capture(); } }
Ptr(const Ptr& b): ptr_(b.ptr_) { if (ptr_) { ptr_->Capture(); } }
Ptr(Ptr&& b) noexcept: ptr_(b.ptr_) { b.ptr_ = nullptr; }
~Ptr() { if (ptr_) { ptr_->Release(); } }
Ptr& operator=(const Ptr& b) {
if (b.ptr_) { b.ptr_->Capture(); }
if (ptr_) { ptr_->Release(); }
ptr_ = b.ptr_;
return *this;
}
Ptr& operator=(Ptr&& b) noexcept {
if (ptr_) { ptr_->Release(); }
ptr_ = b.ptr_;
b.ptr_ = nullptr;
return *this;
}
const T* operator->() const { return ptr_; }
const T& operator*() const { return *ptr_; }
explicit operator bool() const { return ptr_!=nullptr; }
const T* get() const { return ptr_; }
private:
mutable const T* ptr_;
};
>
template<typename T>
class CachedAtomicPtr {
public:
CachedAtomicPtr(): ptr_(nullptr) {}
/// Store p in *this if *this is not yet assigned.
/// Return pointer stored in *this, which can be \a p or not.
Ptr<T> AssignIfNull(Ptr<T> p) {
const T* other = nullptr;
if (ptr_.compare_exchange_strong(other, p.get(), std::memory_order_release, std::memory_order_acquire)) {
p->Capture();
[...]>Yes, that's the idea. The first thread which manages to install non-null pointer will increase the refcount, others will fail and their objects will be released when refcounts drop to zero.
Only one thread should ever get here, right? It just installed the pointer p.get() into ptr_, right?
Les messages affichés proviennent d'usenet.