16
16
#include < atomic>
17
17
18
18
#include " absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
19
- #include " absl/base/internal/spinlock .h"
19
+ #include " absl/synchronization/mutex .h"
20
20
21
21
namespace absl {
22
22
ABSL_NAMESPACE_BEGIN
23
23
namespace cord_internal {
24
24
25
- using ::absl::base_internal::SpinLockHolder;
25
+ namespace {
26
26
27
- ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_ (absl::kConstInit );
27
+ struct Queue {
28
+ Queue () = default ;
29
+
30
+ absl::Mutex mutex;
31
+ std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY (mutex){nullptr };
32
+
33
+ // Returns true if this delete queue is empty. This method does not acquire
34
+ // the lock, but does a 'load acquire' observation on the delete queue tail.
35
+ // It is used inside Delete() to check for the presence of a delete queue
36
+ // without holding the lock. The assumption is that the caller is in the
37
+ // state of 'being deleted', and can not be newly discovered by a concurrent
38
+ // 'being constructed' snapshot instance. Practically, this means that any
39
+ // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
40
+ // before / after' semantics and atomic fences.
41
+ bool IsEmpty () const ABSL_NO_THREAD_SAFETY_ANALYSIS {
42
+ return dq_tail.load (std::memory_order_acquire) == nullptr ;
43
+ }
44
+ };
45
+
46
+ static Queue* GlobalQueue () {
47
+ static Queue* global_queue = new Queue;
48
+ return global_queue;
49
+ }
50
+
51
+ } // namespace
28
52
29
53
CordzHandle::CordzHandle (bool is_snapshot) : is_snapshot_(is_snapshot) {
54
+ Queue* global_queue = GlobalQueue ();
30
55
if (is_snapshot) {
31
- SpinLockHolder lock (&queue_->mutex );
32
- CordzHandle* dq_tail = queue_->dq_tail .load (std::memory_order_acquire);
56
+ MutexLock lock (&global_queue->mutex );
57
+ CordzHandle* dq_tail =
58
+ global_queue->dq_tail .load (std::memory_order_acquire);
33
59
if (dq_tail != nullptr ) {
34
60
dq_prev_ = dq_tail;
35
61
dq_tail->dq_next_ = this ;
36
62
}
37
- queue_ ->dq_tail .store (this , std::memory_order_release);
63
+ global_queue ->dq_tail .store (this , std::memory_order_release);
38
64
}
39
65
}
40
66
41
67
CordzHandle::~CordzHandle () {
42
- ODRCheck ();
68
+ Queue* global_queue = GlobalQueue ();
43
69
if (is_snapshot_) {
44
70
std::vector<CordzHandle*> to_delete;
45
71
{
46
- SpinLockHolder lock (&queue_ ->mutex );
72
+ MutexLock lock (&global_queue ->mutex );
47
73
CordzHandle* next = dq_next_;
48
74
if (dq_prev_ == nullptr ) {
49
75
// We were head of the queue, delete every CordzHandle until we reach
@@ -59,7 +85,7 @@ CordzHandle::~CordzHandle() {
59
85
if (next) {
60
86
next->dq_prev_ = dq_prev_;
61
87
} else {
62
- queue_ ->dq_tail .store (dq_prev_, std::memory_order_release);
88
+ global_queue ->dq_tail .store (dq_prev_, std::memory_order_release);
63
89
}
64
90
}
65
91
for (CordzHandle* handle : to_delete) {
@@ -69,16 +95,15 @@ CordzHandle::~CordzHandle() {
69
95
}
70
96
71
97
bool CordzHandle::SafeToDelete () const {
72
- return is_snapshot_ || queue_ ->IsEmpty ();
98
+ return is_snapshot_ || GlobalQueue () ->IsEmpty ();
73
99
}
74
100
75
101
void CordzHandle::Delete (CordzHandle* handle) {
76
102
assert (handle);
77
103
if (handle) {
78
- handle->ODRCheck ();
79
- Queue* const queue = handle->queue_ ;
104
+ Queue* const queue = GlobalQueue ();
80
105
if (!handle->SafeToDelete ()) {
81
- SpinLockHolder lock (&queue->mutex );
106
+ MutexLock lock (&queue->mutex );
82
107
CordzHandle* dq_tail = queue->dq_tail .load (std::memory_order_acquire);
83
108
if (dq_tail != nullptr ) {
84
109
handle->dq_prev_ = dq_tail;
@@ -93,8 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) {
93
118
94
119
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue () {
95
120
std::vector<const CordzHandle*> handles;
96
- SpinLockHolder lock (&global_queue_.mutex );
97
- CordzHandle* dq_tail = global_queue_.dq_tail .load (std::memory_order_acquire);
121
+ Queue* global_queue = GlobalQueue ();
122
+ MutexLock lock (&global_queue->mutex );
123
+ CordzHandle* dq_tail = global_queue->dq_tail .load (std::memory_order_acquire);
98
124
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_ ) {
99
125
handles.push_back (p);
100
126
}
@@ -103,13 +129,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
103
129
104
130
bool CordzHandle::DiagnosticsHandleIsSafeToInspect (
105
131
const CordzHandle* handle) const {
106
- ODRCheck ();
107
132
if (!is_snapshot_) return false ;
108
133
if (handle == nullptr ) return true ;
109
134
if (handle->is_snapshot_ ) return false ;
110
135
bool snapshot_found = false ;
111
- SpinLockHolder lock (&queue_->mutex );
112
- for (const CordzHandle* p = queue_->dq_tail ; p; p = p->dq_prev_ ) {
136
+ Queue* global_queue = GlobalQueue ();
137
+ MutexLock lock (&global_queue->mutex );
138
+ for (const CordzHandle* p = global_queue->dq_tail ; p; p = p->dq_prev_ ) {
113
139
if (p == handle) return !snapshot_found;
114
140
if (p == this ) snapshot_found = true ;
115
141
}
@@ -119,13 +145,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
119
145
120
146
std::vector<const CordzHandle*>
121
147
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles () {
122
- ODRCheck ();
123
148
std::vector<const CordzHandle*> handles;
124
149
if (!is_snapshot ()) {
125
150
return handles;
126
151
}
127
152
128
- SpinLockHolder lock (&queue_->mutex );
153
+ Queue* global_queue = GlobalQueue ();
154
+ MutexLock lock (&global_queue->mutex );
129
155
for (const CordzHandle* p = dq_next_; p != nullptr ; p = p->dq_next_ ) {
130
156
if (!p->is_snapshot ()) {
131
157
handles.push_back (p);
0 commit comments