ScummVM API documentation
ringbuffer.h
1 /* ScummVM - Graphic Adventure Engine
2  *
3  * ScummVM is the legal property of its developers, whose names
4  * are too numerous to list here. Please refer to the COPYRIGHT
5  * file distributed with this source distribution.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <http://www.gnu.org/licenses/>.
19  *
20  */
21 
22 #ifndef _BACKENDS_MIXER_ANDROID_RINGBUFFER_H_
23 #define _BACKENDS_MIXER_ANDROID_RINGBUFFER_H_
24 
25 #include <assert.h>
26 #include <stddef.h>
27 #include <string.h>
28 #include <new>
29 #include <atomic>
30 
34 template<typename T>
35 class RingBuffer {
36 public:
37  RingBuffer(size_t n) : _size(n + 1), _buffer(new T[_size]), _pending_read(0), _pending_write(0), _read(0), _write(0), _last(0) { }
38  RingBuffer(const RingBuffer<T> &) = delete;
39 
40  /*
41  * Contruct a new RingBuffer moving data from the old one
42  * The other RingBuffer must not be in use and destroyed afterwards.
43  *
44  * When the new RingBuffer is smaller than the previous one, the older samples are dropped.
45  */
46  RingBuffer(size_t n, RingBuffer<T> &&o) : RingBuffer(n) {
47  // First make the RingBuffer was in a valid state and make it invalid
48  const size_t write = o._write.exchange(-1);
49  const size_t read = o._read.exchange(-1);
50  assert(o._pending_write == write);
51  o._pending_write = 0;
52  assert(o._pending_read == read);
53  o._pending_read = -1;
54 
55  T const *buffer = o._buffer;
56  o._buffer = nullptr;
57 
58  // From here, o is completely invalid
59 
60  if (read == write) {
61  // Empty queue: nothing to move
62  delete[] buffer;
63  return;
64  }
65  if (read < write) {
66  // Cap the kept data to our own buffer size
67  size_t nread = read;
68  if (nread + n < write) {
69  nread = write - n;
70  }
71  _pending_write = write - nread;
72 
73  memcpy(&_buffer[0], &buffer[nread], _pending_write * sizeof(T));
74 
75  _last.store(_pending_write, std::memory_order_relaxed);
76  _write.store(_pending_write, std::memory_order_release);
77  delete[] buffer;
78  return;
79  }
80 
81  // read > write: the buffer is in two parts
82  if (n <= write) {
83  // Easy: we can take the last n samples in one shot
84  _pending_write = n;
85  memcpy(&_buffer[0], &buffer[write - n], n * sizeof(T));
86 
87  _last.store(_pending_write, std::memory_order_relaxed);
88  _write.store(_pending_write, std::memory_order_release);
89 
90  delete[] buffer;
91  return;
92  }
93 
94  // n > write
95  size_t last = o._last.load(std::memory_order_relaxed);
96  size_t end_part_sz = last - read;
97  if (end_part_sz > (n - write)) {
98  end_part_sz = n - write;
99  }
100 
101  // First, copy the end of the buffer up to last, then copy the whole beginning
102  _pending_write = end_part_sz + write;
103  memcpy(&_buffer[0], &buffer[last - end_part_sz], end_part_sz * sizeof(T));
104  memcpy(&_buffer[end_part_sz], &buffer[0], write * sizeof(T));
105 
106  _last.store(_pending_write, std::memory_order_relaxed);
107  _write.store(_pending_write, std::memory_order_release);
108 
109  delete[] buffer;
110  }
111 
112  ~RingBuffer() { delete[] _buffer; }
113 
114  /*
115  * Try to produce at least n elements.
116  * The ring-buffer will adjust n with the real element count
117  * which should be produced.
118  * In case of failure, nullptr is returned.
119  *
120  * When successful, n is guaranteed to be at least what has been queried.
121  * A pointer to the buffer to fill is returned.
122  */
123  T *try_produce(size_t *n) {
124  size_t real_n = *n;
125  assert(real_n > 0);
126 
127  size_t write = _write.load(std::memory_order_relaxed);
128  size_t read = _read.load(std::memory_order_acquire);
129  assert(_pending_write == write);
130 
131  // Try to acquire at at least real_n records
132  if (read <= write) {
133  if (write + real_n <= _size) {
134  real_n = _size - write;
135  *n = real_n;
136  _wraparound_write = false;
137  _pending_write = write + real_n;
138  return &_buffer[write];
139  } else if (real_n < read) { // Don't go up to read: that would make believe it's empty
140  real_n = read - 1;
141  *n = real_n;
142  _wraparound_write = true;
143  _pending_write = real_n;
144  return &_buffer[0];
145  } else {
146  return nullptr;
147  }
148  } else {
149  if (write + real_n < read) { // Don't go up to read: that would make believe it's empty
150  real_n = read - write - 1;
151  *n = real_n;
152  _wraparound_write = false;
153  _pending_write = write + real_n;
154  return &_buffer[write];
155  } else {
156  return nullptr;
157  }
158  }
159  }
160 
161  /*
162  * Indicate that n samples have been produced.
163  * n must be less than or equal to what have been returned by try_produce.
164  */
165  void produced(size_t n) {
166  size_t write = _write.load(std::memory_order_relaxed);
167  size_t pending_write;
168  if (_wraparound_write) {
169  pending_write = n;
170  _last.store(write, std::memory_order_relaxed);
171  } else {
172  pending_write = write + n;
173  }
174  // Make sure we didn't overshoot
175  assert(_pending_write >= pending_write);
176  if (pending_write > _last.load(std::memory_order_relaxed)) {
177  _last.store(pending_write, std::memory_order_relaxed);
178  }
179  _pending_write = pending_write;
180  _write.store(pending_write, std::memory_order_release);
181  }
182 
183  /*
184  * Try to consume at most n elements.
185  * If there is less than n elements (or if the buffer is not contiguous), adjusts n to the real count.
186  * If there is no element available, returns nullptr.
187  *
188  * Loop over try_consume until it returns nullptr to fetch all the expected elements.
189  */
190  T *try_consume(size_t *n) {
191  size_t real_n = *n;
192  assert(real_n > 0);
193 
194  size_t read = _read.load(std::memory_order_relaxed);
195  assert(_pending_read == read);
196 
197  // Try to acquire at most n records
198  size_t write = _write.load(std::memory_order_acquire);
199 
200  if (read == write) {
201  // Empty queue: nothing to return
202  return nullptr;
203  } else if (read < write) {
204  if (read + real_n > write) {
205  real_n = write - read;
206  }
207  *n = real_n;
208  _pending_read = read + real_n;
209  return &_buffer[read];
210  } else {
211  size_t last = _last.load(std::memory_order_relaxed);
212  if (read == last) { // This happens when we read up to the end the last time: consider read as 0
213  if (0 == write) {
214  // Empty queue: nothing to return
215  return nullptr;
216  }
217  if (real_n > write) {
218  real_n = write;
219  }
220  *n = real_n;
221  _pending_read = real_n;
222  return &_buffer[0];
223  } else if (read + real_n < last) {
224  *n = real_n;
225  _pending_read = read + real_n;
226  return &_buffer[read];
227  } else {
228  *n = last - read;
229  _pending_read = 0;
230  return &_buffer[read];
231  }
232  }
233  }
234 
235  /*
236  * Indicate that the previous consume request has been done.
237  *
238  * Frees the buffer for more produced samples.
239  */
240  void consumed() {
241  _read.store(_pending_read, std::memory_order_release);
242  }
243 
244 private:
245  const size_t _size;
246  T *_buffer;
247 
248  size_t _pending_read;
249  size_t _pending_write;
250  bool _wraparound_write;
251 
252  // LLVM between 8 and 10 wrongfully said they supported this
253 #if 0 && __cpp_lib_hardware_interference_size
254  static constexpr size_t hardware_destructive_interference_size = std::hardware_destructive_interference_size;
255 #else
256  // 64 is a good fit for most platforms
257  static constexpr size_t hardware_destructive_interference_size = 64;
258 #endif
259 
260  alignas(hardware_destructive_interference_size) std::atomic<size_t> _read;
261  alignas(hardware_destructive_interference_size) std::atomic<size_t> _write;
262  alignas(hardware_destructive_interference_size) std::atomic<size_t> _last;
263 };
264 
265 #endif
Definition: ringbuffer.h:35