Zrythm v2.0.0-DEV
a highly automated and intuitive digital audio workstation
Loading...
Searching...
No Matches
ring_buffer.h
1// SPDX-FileCopyrightText: © 2024 Alexandros Theodotou <alex@zrythm.org>
2// SPDX-License-Identifier: LicenseRef-ZrythmLicense
3/*
4 * This file incorporates work covered by the following copyright and
5 * permission notice:
6 *
7 * Copyright 2011-2022 David Robillard <d@drobilla.net>
8 * SPDX-License-Identifier: ISC
9 */
10
11#pragma once
12
13#include <atomic>
14#include <cstddef>
15#include <memory>
16
38template <typename T> class RingBuffer
39{
40public:
41 explicit RingBuffer (size_t size)
42 : size_ (size + 1), buf_ (std::make_unique<T[]> (size_)), read_head_ (0),
43 write_head_ (0)
44 {
45 }
46
47 RingBuffer (const RingBuffer &other)
48 : size_ (other.size_), buf_ (std::make_unique<T[]> (other.size_)),
49 read_head_ (other.read_head_.load ()),
50 write_head_ (other.write_head_.load ())
51 {
52 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
53 }
54
55#if 0
56 RingBuffer (RingBuffer &&other) noexcept
57 : size_ (other.size_), buf_ (std::move (other.buf_)),
58 read_head_ (other.read_head_.load ()),
59 write_head_ (other.write_head_.load ())
60 {
61 other.size_ = 0;
62 other.read_head_.store (0);
63 other.write_head_.store (0);
64 }
65#endif
66
67#if 0
68 RingBuffer &operator= (const RingBuffer &other)
69 {
70 if (this != &other)
71 {
72 size_ = other.size_;
73 buf_ = std::make_unique<T[]> (other.size_);
74 std::copy (other.buf_.get (), other.buf_.get () + size_, buf_.get ());
75 read_head_.store (other.read_head_.load ());
76 write_head_.store (other.write_head_.load ());
77 }
78 return *this;
79 }
80
81 RingBuffer &operator= (RingBuffer &&other) noexcept
82 {
83 if (this != &other)
84 {
85 size_ = other.size_;
86 buf_ = std::move (other.buf_);
87 read_head_.store (other.read_head_.load ());
88 write_head_.store (other.write_head_.load ());
89 other.size_ = 0;
90 other.read_head_.store (0);
91 other.write_head_.store (0);
92 }
93 return *this;
94 }
95#endif
96
97 bool write (const T &src)
98 {
99 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
100 const size_t next_pos = increment_pos (write_pos);
101 if (next_pos == read_head_.load (std::memory_order_acquire))
102 {
103 return false;
104 }
105 buf_[write_pos] = src;
106 write_head_.store (next_pos, std::memory_order_release);
107 return true;
108 }
109
110 void force_write (const T &src)
111 {
112 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
113 const size_t next_pos = increment_pos (write_pos);
114
115 if (next_pos == read_head_.load (std::memory_order_acquire))
116 {
117 // Buffer is full, drop the element at the read head
118 read_head_.store (
119 increment_pos (read_head_.load (std::memory_order_relaxed)),
120 std::memory_order_release);
121 }
122
123 buf_[write_pos] = src;
124 write_head_.store (next_pos, std::memory_order_release);
125 }
126
127 bool write_multiple (const T * src, size_t count)
128 {
129 const size_t write_pos = write_head_.load (std::memory_order_relaxed);
130 const size_t read_pos = read_head_.load (std::memory_order_acquire);
131 const size_t available_space = (read_pos - write_pos - 1 + size_) % size_;
132
133 if (count > available_space)
134 {
135 return false;
136 }
137
138 for (size_t i = 0; i < count; ++i)
139 {
140 buf_[(write_pos + i) % size_] = src[i];
141 }
142
143 write_head_.store ((write_pos + count) % size_, std::memory_order_release);
144 return true;
145 }
146
147 void force_write_multiple (const T * src, size_t count)
148 {
149 size_t write_pos = write_head_.load (std::memory_order_relaxed);
150 size_t read_pos = read_head_.load (std::memory_order_acquire);
151
152 for (size_t i = 0; i < count; ++i)
153 {
154 buf_[write_pos] = src[i];
155 write_pos = increment_pos (write_pos);
156
157 if (write_pos == read_pos)
158 {
159 read_pos = increment_pos (read_pos);
160 }
161 }
162
163 write_head_.store (write_pos, std::memory_order_release);
164 read_head_.store (read_pos, std::memory_order_release);
165 }
166
167 bool skip (size_t num_elements)
168 {
169 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
170 const size_t write_pos = write_head_.load (std::memory_order_acquire);
171 const size_t available_elements = (write_pos - read_pos) % size_;
172
173 if (num_elements > available_elements)
174 {
175 return false;
176 }
177
178 read_head_.store (
179 (read_pos + num_elements) % size_, std::memory_order_release);
180 return true;
181 }
182
183 bool read (T &dst)
184 {
185 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
186 if (read_pos == write_head_.load (std::memory_order_acquire))
187 {
188 return false;
189 }
190 dst = buf_[read_pos];
191 read_head_.store (increment_pos (read_pos), std::memory_order_release);
192 return true;
193 }
194
207 bool peek (T &dst) const
208 {
209 const size_t read_pos = read_head_.load (std::memory_order_acquire);
210 const size_t write_pos = write_head_.load (std::memory_order_acquire);
211
212 if (read_pos == write_pos)
213 {
214 return false; // Buffer is empty
215 }
216
217 dst = buf_[read_pos];
218 return true;
219 }
220
235 size_t peek_multiple (T * dst, size_t count) const
236 {
237 const size_t read_pos = read_head_.load (std::memory_order_acquire);
238 const size_t write_pos = write_head_.load (std::memory_order_acquire);
239 const size_t available = (write_pos - read_pos + size_) % size_;
240
241 size_t peeked = std::min (count, available);
242
243 for (size_t i = 0; i < peeked; ++i)
244 {
245 dst[i] = buf_[(read_pos + i) % size_];
246 }
247
248 return peeked;
249 }
250
251 void reset ()
252 {
253 read_head_.store (0, std::memory_order_relaxed);
254 write_head_.store (0, std::memory_order_relaxed);
255 }
256
257 size_t capacity () const { return size_ - 1; }
258
259 size_t write_space () const
260 {
261 size_t read_pos = read_head_.load (std::memory_order_relaxed);
262 size_t write_pos = write_head_.load (std::memory_order_relaxed);
263 if (write_pos >= read_pos)
264 {
265 return (size_ - write_pos) + read_pos - 1;
266 }
267 else
268 {
269 return read_pos - write_pos - 1;
270 }
271 }
272
273 size_t read_space () const
274 {
275 size_t read_pos = read_head_.load (std::memory_order_relaxed);
276 size_t write_pos = write_head_.load (std::memory_order_relaxed);
277 if (write_pos >= read_pos)
278 {
279 return write_pos - read_pos;
280 }
281 else
282 {
283 return (size_ - read_pos) + write_pos;
284 }
285 }
286
287 bool can_read_multiple (size_t count) const { return count <= read_space (); }
288
289 bool read_multiple (T * dst, size_t count)
290 {
291 const size_t read_pos = read_head_.load (std::memory_order_relaxed);
292 const size_t write_pos = write_head_.load (std::memory_order_acquire);
293 const size_t available = (write_pos - read_pos + size_) % size_;
294
295 if (count > available)
296 {
297 return false;
298 }
299
300 for (size_t i = 0; i < count; ++i)
301 {
302 dst[i] = buf_[(read_pos + i) % size_];
303 }
304
305 read_head_.store ((read_pos + count) % size_, std::memory_order_release);
306 return true;
307 }
308
309private:
310 size_t increment_pos (size_t pos) const { return (pos + 1) % size_; }
311
312 const size_t size_;
313 std::unique_ptr<T[]> buf_;
314 std::atomic<size_t> read_head_;
315 std::atomic<size_t> write_head_;
316};
bool peek(T &dst) const
Peek a single element from the ring buffer without moving the read head.
size_t peek_multiple(T *dst, size_t count) const
Peek multiple elements from the ring buffer without moving the read head.