Zrythm v2.0.0-DEV
a highly automated and intuitive digital audio workstation
Loading...
Searching...
No Matches
ring_buffer.h
1// SPDX-FileCopyrightText: © 2024 Alexandros Theodotou <alex@zrythm.org>
2// SPDX-License-Identifier: LicenseRef-ZrythmLicense
3/*
4 * This file incorporates work covered by the following copyright and
5 * permission notice:
6 *
7 * Copyright 2011-2022 David Robillard <d@drobilla.net>
8 * SPDX-License-Identifier: ISC
9 */
10
11#ifndef __UTILS_RING_BUFFER_H__
12#define __UTILS_RING_BUFFER_H__
13
14#include <atomic>
15#include <cstddef>
16#include <memory>
17
39template <typename T> class RingBuffer
40{
41public:
42 explicit RingBuffer (size_t size)
43 : size_ (size + 1), buf_ (std::make_unique<T[]> (size_)), read_head_ (0),
44 write_head_ (0)
45 {
46 }
47
48 RingBuffer (const RingBuffer &other)
49 : size_ (other.size_), buf_ (std::make_unique<T[]> (other.size_)),
50 read_head_ (other.read_head_.load ()),
51 write_head_ (other.write_head_.load ())
52 {
53 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
54 }
55
56#if 0
57 RingBuffer (RingBuffer &&other) noexcept
58 : size_ (other.size_), buf_ (std::move (other.buf_)),
59 read_head_ (other.read_head_.load ()),
60 write_head_ (other.write_head_.load ())
61 {
62 other.size_ = 0;
63 other.read_head_.store (0);
64 other.write_head_.store (0);
65 }
66#endif
67
68#if 0
69 RingBuffer &operator= (const RingBuffer &other)
70 {
71 if (this != &other)
72 {
73 size_ = other.size_;
74 buf_ = std::make_unique<T[]> (other.size_);
75 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
76 read_head_.store (other.read_head_.load ());
77 write_head_.store (other.write_head_.load ());
78 }
79 return *this;
80 }
81
82 RingBuffer &operator= (RingBuffer &&other) noexcept
83 {
84 if (this != &other)
85 {
86 size_ = other.size_;
87 buf_ = std::move (other.buf_);
88 read_head_.store (other.read_head_.load ());
89 write_head_.store (other.write_head_.load ());
90 other.size_ = 0;
91 other.read_head_.store (0);
92 other.write_head_.store (0);
93 }
94 return *this;
95 }
96#endif
97
98 bool write (const T &src)
99 {
100 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
101 const size_t next_pos = increment_pos (write_pos);
102 if (next_pos == read_head_.load (std::memory_order_acquire))
103 {
104 return false;
105 }
106 buf_[write_pos] = src;
107 write_head_.store (next_pos, std::memory_order_release);
108 return true;
109 }
110
111 void force_write (const T &src)
112 {
113 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
114 const size_t next_pos = increment_pos (write_pos);
115
116 if (next_pos == read_head_.load (std::memory_order_acquire))
117 {
118 // Buffer is full, drop the element at the read head
119 read_head_.store (
120 increment_pos (read_head_.load (std::memory_order_relaxed)),
121 std::memory_order_release);
122 }
123
124 buf_[write_pos] = src;
125 write_head_.store (next_pos, std::memory_order_release);
126 }
127
128 bool write_multiple (const T * src, size_t count)
129 {
130 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
131 const size_t read_pos = read_head_.load (std::memory_order_acquire);
132 const size_t available_space = (read_pos - write_pos - 1 + size_) % size_;
133
134 if (count > available_space)
135 {
136 return false;
137 }
138
139 for (size_t i = 0; i < count; ++i)
140 {
141 buf_[(write_pos + i) % size_] = src[i];
142 }
143
144 write_head_.store ((write_pos + count) % size_, std::memory_order_release);
145 return true;
146 }
147
148 void force_write_multiple (const T * src, size_t count)
149 {
150 size_t write_pos = write_head_.load (std::memory_order_relaxed);
151 size_t read_pos = read_head_.load (std::memory_order_acquire);
152
153 for (size_t i = 0; i < count; ++i)
154 {
155 buf_[write_pos] = src[i];
156 write_pos = increment_pos (write_pos);
157
158 if (write_pos == read_pos)
159 {
160 read_pos = increment_pos (read_pos);
161 }
162 }
163
164 write_head_.store (write_pos, std::memory_order_release);
165 read_head_.store (read_pos, std::memory_order_release);
166 }
167
168 bool skip (size_t num_elements)
169 {
170 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
171 const size_t write_pos = write_head_.load (std::memory_order_acquire);
172 const size_t available_elements = (write_pos - read_pos) % size_;
173
174 if (num_elements > available_elements)
175 {
176 return false;
177 }
178
179 read_head_.store (
180 (read_pos + num_elements) % size_, std::memory_order_release);
181 return true;
182 }
183
184 bool read (T &dst)
185 {
186 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
187 if (read_pos == write_head_.load (std::memory_order_acquire))
188 {
189 return false;
190 }
191 dst = buf_[read_pos];
192 read_head_.store (increment_pos (read_pos), std::memory_order_release);
193 return true;
194 }
195
208 bool peek (T &dst) const
209 {
210 const size_t read_pos = read_head_.load (std::memory_order_acquire);
211 const size_t write_pos = write_head_.load (std::memory_order_acquire);
212
213 if (read_pos == write_pos)
214 {
215 return false; // Buffer is empty
216 }
217
218 dst = buf_[read_pos];
219 return true;
220 }
221
236 size_t peek_multiple (T * dst, size_t count) const
237 {
238 const size_t read_pos = read_head_.load (std::memory_order_acquire);
239 const size_t write_pos = write_head_.load (std::memory_order_acquire);
240 const size_t available = (write_pos - read_pos + size_) % size_;
241
242 size_t peeked = std::min (count, available);
243
244 for (size_t i = 0; i < peeked; ++i)
245 {
246 dst[i] = buf_[(read_pos + i) % size_];
247 }
248
249 return peeked;
250 }
251
252 void reset ()
253 {
254 read_head_.store (0, std::memory_order_relaxed);
255 write_head_.store (0, std::memory_order_relaxed);
256 }
257
258 size_t capacity () const { return size_ - 1; }
259
260 size_t write_space () const
261 {
262 size_t read_pos = read_head_.load (std::memory_order_relaxed);
263 size_t write_pos = write_head_.load (std::memory_order_relaxed);
264 if (write_pos >= read_pos)
265 {
266 return (size_ - write_pos) + read_pos - 1;
267 }
268 else
269 {
270 return read_pos - write_pos - 1;
271 }
272 }
273
274 size_t read_space () const
275 {
276 size_t read_pos = read_head_.load (std::memory_order_relaxed);
277 size_t write_pos = write_head_.load (std::memory_order_relaxed);
278 if (write_pos >= read_pos)
279 {
280 return write_pos - read_pos;
281 }
282 else
283 {
284 return (size_ - read_pos) + write_pos;
285 }
286 }
287
288 bool can_read_multiple (size_t count) const { return count <= read_space (); }
289
290 bool read_multiple (T * dst, size_t count)
291 {
292 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
293 const size_t write_pos = write_head_.load (std::memory_order_acquire);
294 const size_t available = (write_pos - read_pos + size_) % size_;
295
296 if (count > available)
297 {
298 return false;
299 }
300
301 for (size_t i = 0; i < count; ++i)
302 {
303 dst[i] = buf_[(read_pos + i) % size_];
304 }
305
306 read_head_.store ((read_pos + count) % size_, std::memory_order_release);
307 return true;
308 }
309
310private:
311 size_t increment_pos (size_t pos) const { return (pos + 1) % size_; }
312
313 const size_t size_;
314 std::unique_ptr<T[]> buf_;
315 std::atomic<size_t> read_head_;
316 std::atomic<size_t> write_head_;
317};
318
319#endif // __UTILS_RING_BUFFER_H__
bool peek(T &dst) const
Peek a single element from the ring buffer without moving the read head.
size_t peek_multiple(T *dst, size_t count) const
Peek multiple elements from the ring buffer without moving the read head.