22 #ifndef _BACKENDS_MIXER_ANDROID_RINGBUFFER_H_ 23 #define _BACKENDS_MIXER_ANDROID_RINGBUFFER_H_ 37 RingBuffer(
size_t n) : _size(n + 1), _buffer(
new T[_size]), _pending_read(0), _pending_write(0), _read(0), _write(0), _last(0) { }
48 const size_t write = o._write.exchange(-1);
49 const size_t read = o._read.exchange(-1);
50 assert(o._pending_write == write);
52 assert(o._pending_read == read);
55 T
const *buffer = o._buffer;
68 if (nread + n < write) {
71 _pending_write = write - nread;
73 memcpy(&_buffer[0], &buffer[nread], _pending_write *
sizeof(T));
75 _last.store(_pending_write, std::memory_order_relaxed);
76 _write.store(_pending_write, std::memory_order_release);
85 memcpy(&_buffer[0], &buffer[write - n], n *
sizeof(T));
87 _last.store(_pending_write, std::memory_order_relaxed);
88 _write.store(_pending_write, std::memory_order_release);
95 size_t last = o._last.load(std::memory_order_relaxed);
96 size_t end_part_sz = last - read;
97 if (end_part_sz > (n - write)) {
98 end_part_sz = n - write;
102 _pending_write = end_part_sz + write;
103 memcpy(&_buffer[0], &buffer[last - end_part_sz], end_part_sz *
sizeof(T));
104 memcpy(&_buffer[end_part_sz], &buffer[0], write *
sizeof(T));
106 _last.store(_pending_write, std::memory_order_relaxed);
107 _write.store(_pending_write, std::memory_order_release);
123 T *try_produce(
size_t *n) {
127 size_t write = _write.load(std::memory_order_relaxed);
128 size_t read = _read.load(std::memory_order_acquire);
129 assert(_pending_write == write);
133 if (write + real_n <= _size) {
134 real_n = _size - write;
136 _wraparound_write =
false;
137 _pending_write = write + real_n;
138 return &_buffer[write];
139 }
else if (real_n < read) {
142 _wraparound_write =
true;
143 _pending_write = real_n;
149 if (write + real_n < read) {
150 real_n = read - write - 1;
152 _wraparound_write =
false;
153 _pending_write = write + real_n;
154 return &_buffer[write];
165 void produced(
size_t n) {
166 size_t write = _write.load(std::memory_order_relaxed);
167 size_t pending_write;
168 if (_wraparound_write) {
170 _last.store(write, std::memory_order_relaxed);
172 pending_write = write + n;
175 assert(_pending_write >= pending_write);
176 if (pending_write > _last.load(std::memory_order_relaxed)) {
177 _last.store(pending_write, std::memory_order_relaxed);
179 _pending_write = pending_write;
180 _write.store(pending_write, std::memory_order_release);
190 T *try_consume(
size_t *n) {
194 size_t read = _read.load(std::memory_order_relaxed);
195 assert(_pending_read == read);
198 size_t write = _write.load(std::memory_order_acquire);
203 }
else if (read < write) {
204 if (read + real_n > write) {
205 real_n = write - read;
208 _pending_read = read + real_n;
209 return &_buffer[read];
211 size_t last = _last.load(std::memory_order_relaxed);
217 if (real_n > write) {
221 _pending_read = real_n;
223 }
else if (read + real_n < last) {
225 _pending_read = read + real_n;
226 return &_buffer[read];
230 return &_buffer[read];
241 _read.store(_pending_read, std::memory_order_release);
248 size_t _pending_read;
249 size_t _pending_write;
250 bool _wraparound_write;
253 #if 0 && __cpp_lib_hardware_interference_size 254 static constexpr
size_t hardware_destructive_interference_size = std::hardware_destructive_interference_size;
257 static constexpr
size_t hardware_destructive_interference_size = 64;
260 alignas(hardware_destructive_interference_size) std::atomic<size_t> _read;
261 alignas(hardware_destructive_interference_size) std::atomic<size_t> _write;
262 alignas(hardware_destructive_interference_size) std::atomic<size_t> _last;
Definition: ringbuffer.h:35