39template <
typename T>
class RingBuffer
42 explicit RingBuffer (
size_t size)
43 : size_ (size + 1), buf_ (std::make_unique<T[]> (size_)), read_head_ (0),
48 RingBuffer (
const RingBuffer &other)
49 : size_ (other.size_), buf_ (std::make_unique<T[]> (other.size_)),
50 read_head_ (other.read_head_.load ()),
51 write_head_ (other.write_head_.load ())
53 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
57 RingBuffer (RingBuffer &&other) noexcept
58 : size_ (other.size_), buf_ (std::move (other.buf_)),
59 read_head_ (other.read_head_.load ()),
60 write_head_ (other.write_head_.load ())
63 other.read_head_.store (0);
64 other.write_head_.store (0);
69 RingBuffer &operator= (
const RingBuffer &other)
74 buf_ = std::make_unique<T[]> (other.size_);
75 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
76 read_head_.store (other.read_head_.load ());
77 write_head_.store (other.write_head_.load ());
82 RingBuffer &operator= (RingBuffer &&other)
noexcept
87 buf_ = std::move (other.buf_);
88 read_head_.store (other.read_head_.load ());
89 write_head_.store (other.write_head_.load ());
91 other.read_head_.store (0);
92 other.write_head_.store (0);
98 bool write (
const T &src)
100 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
101 const size_t next_pos = increment_pos (write_pos);
102 if (next_pos == read_head_.load (std::memory_order_acquire))
106 buf_[write_pos] = src;
107 write_head_.store (next_pos, std::memory_order_release);
111 void force_write (
const T &src)
113 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
114 const size_t next_pos = increment_pos (write_pos);
116 if (next_pos == read_head_.load (std::memory_order_acquire))
120 increment_pos (read_head_.load (std::memory_order_relaxed)),
121 std::memory_order_release);
124 buf_[write_pos] = src;
125 write_head_.store (next_pos, std::memory_order_release);
128 bool write_multiple (
const T * src,
size_t count)
130 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
131 const size_t read_pos = read_head_.load (std::memory_order_acquire);
132 const size_t available_space = (read_pos - write_pos - 1 + size_) % size_;
134 if (count > available_space)
139 for (
size_t i = 0; i < count; ++i)
141 buf_[(write_pos + i) % size_] = src[i];
144 write_head_.store ((write_pos + count) % size_, std::memory_order_release);
148 void force_write_multiple (
const T * src,
size_t count)
150 size_t write_pos = write_head_.load (std::memory_order_relaxed);
151 size_t read_pos = read_head_.load (std::memory_order_acquire);
153 for (
size_t i = 0; i < count; ++i)
155 buf_[write_pos] = src[i];
156 write_pos = increment_pos (write_pos);
158 if (write_pos == read_pos)
160 read_pos = increment_pos (read_pos);
164 write_head_.store (write_pos, std::memory_order_release);
165 read_head_.store (read_pos, std::memory_order_release);
168 bool skip (
size_t num_elements)
170 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
171 const size_t write_pos = write_head_.load (std::memory_order_acquire);
172 const size_t available_elements = (write_pos - read_pos) % size_;
174 if (num_elements > available_elements)
180 (read_pos + num_elements) % size_, std::memory_order_release);
186 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
187 if (read_pos == write_head_.load (std::memory_order_acquire))
191 dst = buf_[read_pos];
192 read_head_.store (increment_pos (read_pos), std::memory_order_release);
210 const size_t read_pos = read_head_.load (std::memory_order_acquire);
211 const size_t write_pos = write_head_.load (std::memory_order_acquire);
213 if (read_pos == write_pos)
218 dst = buf_[read_pos];
238 const size_t read_pos = read_head_.load (std::memory_order_acquire);
239 const size_t write_pos = write_head_.load (std::memory_order_acquire);
240 const size_t available = (write_pos - read_pos + size_) % size_;
242 size_t peeked = std::min (count, available);
244 for (
size_t i = 0; i < peeked; ++i)
246 dst[i] = buf_[(read_pos + i) % size_];
254 read_head_.store (0, std::memory_order_relaxed);
255 write_head_.store (0, std::memory_order_relaxed);
258 size_t capacity ()
const {
return size_ - 1; }
260 size_t write_space ()
const
262 size_t read_pos = read_head_.load (std::memory_order_relaxed);
263 size_t write_pos = write_head_.load (std::memory_order_relaxed);
264 if (write_pos >= read_pos)
266 return (size_ - write_pos) + read_pos - 1;
270 return read_pos - write_pos - 1;
274 size_t read_space ()
const
276 size_t read_pos = read_head_.load (std::memory_order_relaxed);
277 size_t write_pos = write_head_.load (std::memory_order_relaxed);
278 if (write_pos >= read_pos)
280 return write_pos - read_pos;
284 return (size_ - read_pos) + write_pos;
288 bool can_read_multiple (
size_t count)
const {
return count <= read_space (); }
290 bool read_multiple (T * dst,
size_t count)
292 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
293 const size_t write_pos = write_head_.load (std::memory_order_acquire);
294 const size_t available = (write_pos - read_pos + size_) % size_;
296 if (count > available)
301 for (
size_t i = 0; i < count; ++i)
303 dst[i] = buf_[(read_pos + i) % size_];
306 read_head_.store ((read_pos + count) % size_, std::memory_order_release);
311 size_t increment_pos (
size_t pos)
const {
return (pos + 1) % size_; }
314 std::unique_ptr<T[]> buf_;
315 std::atomic<size_t> read_head_;
316 std::atomic<size_t> write_head_;