include/boost/corosio/native/detail/reactor/reactor_descriptor_state.hpp

74.3% Lines (55/74) 100.0% List of functions (4/4)
reactor_descriptor_state.hpp
f(x) Functions (4)
Line TLA Hits Source Code
1 //
2 // Copyright (c) 2026 Steve Gerbino
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/cppalliance/corosio
8 //
9
10 #ifndef BOOST_COROSIO_NATIVE_DETAIL_REACTOR_REACTOR_DESCRIPTOR_STATE_HPP
11 #define BOOST_COROSIO_NATIVE_DETAIL_REACTOR_REACTOR_DESCRIPTOR_STATE_HPP
12
13 #include <boost/corosio/native/detail/reactor/reactor_op_base.hpp>
14 #include <boost/corosio/native/detail/reactor/reactor_scheduler.hpp>
15
16 #include <boost/corosio/detail/conditionally_enabled_mutex.hpp>
17
18 #include <atomic>
19 #include <cstdint>
20 #include <memory>
21
22 #include <errno.h>
23 #include <sys/socket.h>
24
25 namespace boost::corosio::detail {
26
27 /// Shared reactor event constants.
28 /// These match epoll numeric values; kqueue maps its events to the same.
29 static constexpr std::uint32_t reactor_event_read = 0x001;
30 static constexpr std::uint32_t reactor_event_write = 0x004;
31 static constexpr std::uint32_t reactor_event_error = 0x008;
32
33 /** Per-descriptor state shared across reactor backends.
34
35 Tracks pending operations for a file descriptor. The fd is registered
36 once with the reactor and stays registered until closed. Uses deferred
37 I/O: the reactor sets ready_events atomically, then enqueues this state.
38 When popped by the scheduler, invoke_deferred_io() performs I/O under
39 the mutex and queues completed ops.
40
41 Non-template: uses reactor_op_base pointers so the scheduler and
42 descriptor_state code exist as a single copy in the binary regardless
43 of how many backends are compiled in.
44
45 @par Thread Safety
46 The mutex protects operation pointers and ready flags. ready_events_
47 and is_enqueued_ are atomic for lock-free reactor access.
48 */
49 struct reactor_descriptor_state : scheduler_op
50 {
51 /// Protects operation pointers and ready/cancel flags.
52 /// Becomes a no-op in single-threaded mode.
53 conditionally_enabled_mutex mutex{true};
54
55 /// Pending read operation (guarded by `mutex`).
56 reactor_op_base* read_op = nullptr;
57
58 /// Pending write operation (guarded by `mutex`).
59 reactor_op_base* write_op = nullptr;
60
61 /// Pending connect operation (guarded by `mutex`).
62 reactor_op_base* connect_op = nullptr;
63
64 /// True if a read edge event arrived before an op was registered.
65 bool read_ready = false;
66
67 /// True if a write edge event arrived before an op was registered.
68 bool write_ready = false;
69
70 /// Deferred read cancellation (IOCP-style cancel semantics).
71 bool read_cancel_pending = false;
72
73 /// Deferred write cancellation (IOCP-style cancel semantics).
74 bool write_cancel_pending = false;
75
76 /// Deferred connect cancellation (IOCP-style cancel semantics).
77 bool connect_cancel_pending = false;
78
79 /// Event mask set during registration (no mutex needed).
80 std::uint32_t registered_events = 0;
81
82 /// File descriptor this state tracks.
83 int fd = -1;
84
85 /// Accumulated ready events (set by reactor, read by scheduler).
86 std::atomic<std::uint32_t> ready_events_{0};
87
88 /// True while this state is queued in the scheduler's completed_ops.
89 std::atomic<bool> is_enqueued_{false};
90
91 /// Owning scheduler for posting completions.
92 reactor_scheduler_base const* scheduler_ = nullptr;
93
94 /// Prevents impl destruction while queued in the scheduler.
95 std::shared_ptr<void> impl_ref_;
96
97 /// Add ready events atomically.
98 /// Release pairs with the consumer's acquire exchange on
99 /// ready_events_ so the consumer sees all flags. On x86 (TSO)
100 /// this compiles to the same LOCK OR as relaxed.
101 169839x void add_ready_events(std::uint32_t ev) noexcept
102 {
103 169839x ready_events_.fetch_or(ev, std::memory_order_release);
104 169839x }
105
106 /// Invoke deferred I/O and dispatch completions.
107 169770x void operator()() override
108 {
109 169770x invoke_deferred_io();
110 169770x }
111
112 /// Destroy without invoking.
113 /// Called during scheduler::shutdown() drain. Clear impl_ref_ to break
114 /// the self-referential cycle set by close_socket().
115 69x void destroy() override
116 {
117 69x impl_ref_.reset();
118 69x }
119
120 /** Perform deferred I/O and queue completions.
121
122 Performs I/O under the mutex and queues completed ops. EAGAIN
123 ops stay parked in their slot for re-delivery on the next
124 edge event.
125 */
126 void invoke_deferred_io();
127 };
128
129 inline void
130 169770x reactor_descriptor_state::invoke_deferred_io()
131 {
132 169770x std::shared_ptr<void> prevent_impl_destruction;
133 169770x op_queue local_ops;
134
135 {
136 169770x conditionally_enabled_mutex::scoped_lock lock(mutex);
137
138 // Must clear is_enqueued_ and move impl_ref_ under the same
139 // lock that processes I/O. close_socket() checks is_enqueued_
140 // under this mutex — without atomicity between the flag store
141 // and the ref move, close_socket() could see is_enqueued_==false,
142 // skip setting impl_ref_, and destroy the impl under us.
143 169770x prevent_impl_destruction = std::move(impl_ref_);
144 169770x is_enqueued_.store(false, std::memory_order_release);
145
146 169770x std::uint32_t ev = ready_events_.exchange(0, std::memory_order_acquire);
147 169770x if (ev == 0)
148 {
149 // Mutex unlocks here; compensate for work_cleanup's decrement
150 scheduler_->compensating_work_started();
151 return;
152 }
153
154 169770x int err = 0;
155 169770x if (ev & reactor_event_error)
156 {
157 1x socklen_t len = sizeof(err);
158 1x if (::getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len) < 0)
159 err = errno;
160 1x if (err == 0)
161 err = EIO;
162 }
163
164 169770x if (ev & reactor_event_read)
165 {
166 141269x if (read_op)
167 {
168 8492x auto* rd = read_op;
169 8492x if (err)
170 rd->complete(err, 0);
171 else
172 8492x rd->perform_io();
173
174 8492x if (rd->errn == EAGAIN || rd->errn == EWOULDBLOCK)
175 {
176 44x rd->errn = 0;
177 }
178 else
179 {
180 8448x read_op = nullptr;
181 8448x local_ops.push(rd);
182 }
183 }
184 else
185 {
186 132777x read_ready = true;
187 }
188 }
189 169770x if (ev & reactor_event_write)
190 {
191 36355x bool had_write_op = (connect_op || write_op);
192 36355x if (connect_op)
193 {
194 8358x auto* cn = connect_op;
195 8358x if (err)
196 1x cn->complete(err, 0);
197 else
198 8357x cn->perform_io();
199 8358x connect_op = nullptr;
200 8358x local_ops.push(cn);
201 }
202 36355x if (write_op)
203 {
204 auto* wr = write_op;
205 if (err)
206 wr->complete(err, 0);
207 else
208 wr->perform_io();
209
210 if (wr->errn == EAGAIN || wr->errn == EWOULDBLOCK)
211 {
212 wr->errn = 0;
213 }
214 else
215 {
216 write_op = nullptr;
217 local_ops.push(wr);
218 }
219 }
220 36355x if (!had_write_op)
221 27997x write_ready = true;
222 }
223 169770x if (err)
224 {
225 1x if (read_op)
226 {
227 read_op->complete(err, 0);
228 local_ops.push(std::exchange(read_op, nullptr));
229 }
230 1x if (write_op)
231 {
232 write_op->complete(err, 0);
233 local_ops.push(std::exchange(write_op, nullptr));
234 }
235 1x if (connect_op)
236 {
237 connect_op->complete(err, 0);
238 local_ops.push(std::exchange(connect_op, nullptr));
239 }
240 }
241 169770x }
242
243 // Execute first handler inline — the scheduler's work_cleanup
244 // accounts for this as the "consumed" work item
245 169770x scheduler_op* first = local_ops.pop();
246 169770x if (first)
247 {
248 16806x scheduler_->post_deferred_completions(local_ops);
249 16806x (*first)();
250 }
251 else
252 {
253 152964x scheduler_->compensating_work_started();
254 }
255 169770x }
256
257 } // namespace boost::corosio::detail
258
259 #endif // BOOST_COROSIO_NATIVE_DETAIL_REACTOR_REACTOR_DESCRIPTOR_STATE_HPP
260