std::atomic - 2nd batch of changes to convert from glib to std::atomic

This commit is contained in:
Paul Davis 2023-02-17 11:34:35 -07:00
parent c304edd253
commit a486fba3e9
30 changed files with 179 additions and 233 deletions

View file

@ -118,7 +118,7 @@ class LIBARDOUR_API AudioEngine : public PortManager, public SessionHandlePtr
bool is_realtime() const;
// for the user which hold state_lock to check if reset operation is pending
bool is_reset_requested() const { return g_atomic_int_get (const_cast<std::atomic<int>*> (&_hw_reset_request_count)); }
bool is_reset_requested() const { return _hw_reset_request_count.load(); }
int set_device_name (const std::string&);
int set_sample_rate (float);

View file

@ -109,7 +109,7 @@ public:
void start_touch (timepos_t const & when);
void stop_touch (timepos_t const & when);
bool touching () const { return g_atomic_int_get (const_cast<std::atomic<int>*>(&_touching)) != 0; }
bool touching () const { return _touching.load() != 0; }
bool writing () const { return _state == Write; }
bool touch_enabled () const { return _state & (Touch | Latch); }

View file

@ -20,6 +20,7 @@
#define _ardour_circular_buffer_h_
#include <atomic>
#include <cstdint>
#include "pbd/ringbuffer.h"
@ -36,11 +37,11 @@ namespace ARDOUR {
class LIBARDOUR_API CircularSampleBuffer
{
public:
CircularSampleBuffer (samplecnt_t size);
CircularSampleBuffer (size_t size);
void silence (samplecnt_t);
void write (Sample const*, samplecnt_t);
bool read (Sample& s_min, Sample& s_max, samplecnt_t n_samples);
void silence (size_t);
void write (Sample const*, size_t);
bool read (Sample& s_min, Sample& s_max, size_t n_samples);
private:
PBD::RingBuffer<Sample> _rb;
@ -59,7 +60,7 @@ public:
typedef std::vector<Event> EventList;
CircularEventBuffer (samplecnt_t size);
CircularEventBuffer (size_t size);
~CircularEventBuffer ();
void reset ();
@ -70,11 +71,11 @@ private:
CircularEventBuffer (CircularEventBuffer const&);
Event* _buf;
guint _size;
guint _size_mask;
size_t _size;
size_t _size_mask;
std::atomic<int> _idx;
std::atomic<int> _ack;
std::atomic<size_t> _idx;
std::atomic<size_t> _ack;
};
}

View file

@ -85,8 +85,8 @@ public:
std::list<std::shared_ptr<Source> >& last_capture_sources () { return _last_capture_sources; }
bool record_enabled () const { return g_atomic_int_get (const_cast<std::atomic<int>*>(&_record_enabled)); }
bool record_safe () const { return g_atomic_int_get (const_cast<std::atomic<int>*>(&_record_safe)); }
bool record_enabled () const { return _record_enabled.load(); }
bool record_safe () const { return _record_safe.load(); }
void set_record_enabled (bool yn);
void set_record_safe (bool yn);

View file

@ -18,8 +18,9 @@
#ifndef _dsp_filter_h_
#define _dsp_filter_h_
#include <stdint.h>
#include <string.h>
#include <atomic>
#include <cstdint>
#include <cstring>
#include <assert.h>
#include <glib.h>
#include <glibmm.h>
@ -112,7 +113,8 @@ namespace ARDOUR { namespace DSP {
* @param val value to set
*/
void atomic_set_int (size_t off, int32_t val) {
g_atomic_int_set (&(((int32_t*)_data)[off]), val);
((int32_t*)_data)[off] = val;
std::atomic_thread_fence (std::memory_order_release);
}
/** atomically read integer at offset
@ -125,7 +127,8 @@ namespace ARDOUR { namespace DSP {
* @returns value at offset
*/
int32_t atomic_get_int (size_t off) {
return g_atomic_int_get (&(((int32_t*)_data)[off]));
std::atomic_thread_fence (std::memory_order_acquire);
return (((int32_t*)_data)[off]);
}
private:

View file

@ -107,25 +107,25 @@ private:
void helper_thread ();
PBD::MPMCQueue<ProcessNode*> _trigger_queue; ///< nodes that can be processed
std::atomic<unsigned int> _trigger_queue_size; ///< number of entries in trigger-queue
std::atomic<uint32_t> _trigger_queue_size; ///< number of entries in trigger-queue
/** Start worker threads */
PBD::Semaphore _execution_sem;
/** The number of processing threads that are asleep */
std::atomic<unsigned int> _idle_thread_cnt;
std::atomic<uint32_t> _idle_thread_cnt;
/** Signalled to start a run of the graph for a process callback */
PBD::Semaphore _callback_start_sem;
PBD::Semaphore _callback_done_sem;
/** The number of unprocessed nodes that do not feed any other node; updated during processing */
std::atomic<unsigned int> _terminal_refcnt;
std::atomic<uint32_t> _terminal_refcnt;
bool _graph_empty;
/* number of background worker threads >= 0 */
std::atomic<unsigned int> _n_workers;
std::atomic<uint32_t> _n_workers;
/* flag to terminate background threads */
std::atomic<int> _terminate;

View file

@ -73,17 +73,17 @@ public:
/** Atomically get both the channel mode and mask. */
void get_mode_and_mask(ChannelMode* mode, uint16_t* mask) const {
const uint32_t mm = g_atomic_int_get (const_cast<std::atomic<unsigned int>*> (&_mode_mask));
const uint32_t mm = _mode_mask.load();
*mode = static_cast<ChannelMode>((mm & 0xFFFF0000) >> 16);
*mask = (mm & 0x0000FFFF);
}
ChannelMode get_channel_mode() const {
return static_cast<ChannelMode>((g_atomic_int_get (const_cast<std::atomic<unsigned int>*> (&_mode_mask)) & 0xFFFF0000) >> 16);
return static_cast<ChannelMode> ((_mode_mask.load() & 0xFFFF0000) >> 16);
}
uint16_t get_channel_mask() const {
return g_atomic_int_get (const_cast<std::atomic<unsigned int>*> (&_mode_mask)) & 0x0000FFFF;
return _mode_mask.load() & 0x0000FFFF;
}
PBD::Signal0<void> ChannelMaskChanged;

View file

@ -66,7 +66,7 @@ public:
void start_touch (timepos_t const & when);
void stop_touch (timepos_t const & when);
bool touching() const { return g_atomic_int_get (const_cast<std::atomic<int>*> (&_touching)); }
bool touching() const { return _touching.load(); }
bool writing() const { return _auto_state == Write; }
bool touch_enabled() const { return _auto_state & (Touch | Latch); }

View file

@ -140,7 +140,7 @@ public:
virtual void inc_use_count ();
virtual void dec_use_count ();
int use_count() const { return g_atomic_int_get (const_cast<std::atomic<int>*> (&_use_count)); }
int use_count() const { return _use_count.load(); }
bool used() const { return use_count() > 0; }
uint32_t level() const { return _level; }

View file

@ -495,7 +495,7 @@ AUPlugin::discover_factory_presets ()
void
AUPlugin::init ()
{
g_atomic_int_set (&_current_latency, UINT_MAX);
_current_latency.store (UINT_MAX);
OSErr err;
@ -763,7 +763,7 @@ AUPlugin::plugin_latency () const
guint lat = _current_latency.load ();;
if (lat == UINT_MAX) {
lat = unit->Latency() * _session.sample_rate();
g_atomic_int_set (&_current_latency, lat);
_current_latency.store (lat);
}
return lat;
}
@ -2660,7 +2660,7 @@ AUPlugin::parameter_change_listener (void* /*arg*/, void* src, const AudioUnitEv
if (event->mArgument.mProperty.mPropertyID == kAudioUnitProperty_Latency) {
DEBUG_TRACE (DEBUG::AudioUnitConfig, string_compose("AU Latency Change Event %1 <> %2\n", new_value, unit->Latency()));
guint lat = unit->Latency() * _session.sample_rate();
g_atomic_int_set (&_current_latency, lat);
_current_latency.store (lat);
}
return;
}

View file

@ -95,7 +95,7 @@ AutomationList::AutomationList (const AutomationList& other)
, _before (0)
{
_state = other._state;
g_atomic_int_set (&_touching, other.touching());
_touching.store (other.touching());
create_curve_if_necessary();
@ -108,7 +108,7 @@ AutomationList::AutomationList (const AutomationList& other, timepos_t const & s
, _before (0)
{
_state = other._state;
g_atomic_int_set (&_touching, other.touching());
_touching.store (other.touching());
create_curve_if_necessary();

View file

@ -21,15 +21,15 @@
using namespace ARDOUR;
CircularSampleBuffer::CircularSampleBuffer (samplecnt_t size)
CircularSampleBuffer::CircularSampleBuffer (size_t size)
: _rb (size)
{
}
void
CircularSampleBuffer::write (Sample const* buf, samplecnt_t n_samples)
CircularSampleBuffer::write (Sample const* buf, size_t n_samples)
{
ssize_t ws = (ssize_t) _rb.write_space ();
size_t ws = _rb.write_space ();
if (ws < n_samples) {
/* overwrite old data (consider a spinlock wrt ::read) */
_rb.increment_read_idx (n_samples - ws);
@ -38,16 +38,16 @@ CircularSampleBuffer::write (Sample const* buf, samplecnt_t n_samples)
}
void
CircularSampleBuffer::silence (samplecnt_t n_samples)
CircularSampleBuffer::silence (size_t n_samples)
{
ssize_t ws = (ssize_t) _rb.write_space ();
size_t ws = _rb.write_space ();
if (ws < n_samples) {
/* overwrite old data (consider a spinlock wrt ::read) */
_rb.increment_read_idx (n_samples - ws);
}
PBD::RingBuffer<Sample>::rw_vector vec;
_rb.get_write_vector (&vec);
if (vec.len[0] >= n_samples) {
if (vec.len[0] >= (size_t) n_samples) {
memset (vec.buf[0], 0, sizeof (Sample) * n_samples);
} else {
assert (vec.len[0] > 0 && vec.len[0] + vec.len[1] >= n_samples);
@ -58,7 +58,7 @@ CircularSampleBuffer::silence (samplecnt_t n_samples)
}
bool
CircularSampleBuffer::read (Sample& s_min, Sample& s_max, samplecnt_t spp)
CircularSampleBuffer::read (Sample& s_min, Sample& s_max, size_t spp)
{
s_min = s_max = 0;
@ -72,10 +72,10 @@ CircularSampleBuffer::read (Sample& s_min, Sample& s_max, samplecnt_t spp)
/* immediately mark as read, allow writer to overwrite data if needed */
_rb.increment_read_idx (spp);
samplecnt_t to_proc = std::min (spp, (samplecnt_t)vec.len[0]);
size_t to_proc = std::min (spp, vec.len[0]);
ARDOUR::find_peaks (vec.buf[0], to_proc, &s_min, &s_max);
to_proc = std::min (spp - to_proc, (samplecnt_t)vec.len[1]);
to_proc = std::min (spp - to_proc, vec.len[1]);
if (to_proc > 0) { // XXX is this check needed?
ARDOUR::find_peaks (vec.buf[1], to_proc, &s_min, &s_max);
}
@ -111,7 +111,7 @@ CircularEventBuffer::Event::Event (uint8_t const* buf, size_t size)
pad = 0;
}
CircularEventBuffer::CircularEventBuffer (samplecnt_t size)
CircularEventBuffer::CircularEventBuffer (size_t size)
{
guint power_of_two;
for (power_of_two = 1; 1U << power_of_two < size; ++power_of_two) {}
@ -142,7 +142,7 @@ CircularEventBuffer::write (uint8_t const* buf, size_t size)
guint write_idx = _idx.load ();
memcpy (&_buf[write_idx], &e, sizeof (Event));
write_idx = (write_idx + 1) & _size_mask;
g_atomic_int_set (&_idx, write_idx);
_idx.store (write_idx);
_ack.store (1);
}
@ -150,7 +150,7 @@ bool
CircularEventBuffer::read (EventList& l)
{
guint to_read = _size_mask;
int canderef (1);
size_t canderef (1);
if (!_ack.compare_exchange_strong (canderef, 0)) {
return false;
}

View file

@ -687,10 +687,10 @@ DiskReader::overwrite_existing_audio ()
const bool reversed = !_session.transport_will_roll_forwards ();
sampleoffset_t chunk1_offset;
samplecnt_t chunk1_cnt;
samplecnt_t chunk2_cnt;
size_t chunk1_cnt;
size_t chunk2_cnt;
const samplecnt_t to_overwrite = c->front()->rbuf->overwritable_at (overwrite_offset);
const size_t to_overwrite = c->front()->rbuf->overwritable_at (overwrite_offset);
chunk1_offset = overwrite_offset;
chunk1_cnt = min (c->front()->rbuf->bufsize() - overwrite_offset, to_overwrite);
@ -728,7 +728,7 @@ DiskReader::overwrite_existing_audio ()
start = overwrite_sample;
if (chunk1_cnt) {
if (audio_read (buf + chunk1_offset, mixdown_buffer.get (), gain_buffer.get (), start, chunk1_cnt, rci, n, reversed) != chunk1_cnt) {
if (audio_read (buf + chunk1_offset, mixdown_buffer.get (), gain_buffer.get (), start, chunk1_cnt, rci, n, reversed) != (samplecnt_t) chunk1_cnt) {
error << string_compose (_("DiskReader %1: when overwriting(1), cannot read %2 from playlist at sample %3"), id (), chunk1_cnt, overwrite_sample) << endmsg;
ret = false;
continue;
@ -737,7 +737,7 @@ DiskReader::overwrite_existing_audio ()
}
if (chunk2_cnt) {
if (audio_read (buf, mixdown_buffer.get (), gain_buffer.get (), start, chunk2_cnt, rci, n, reversed) != chunk2_cnt) {
if (audio_read (buf, mixdown_buffer.get (), gain_buffer.get (), start, chunk2_cnt, rci, n, reversed) != (samplecnt_t) chunk2_cnt) {
error << string_compose (_("DiskReader %1: when overwriting(2), cannot read %2 from playlist at sample %3"), id (), chunk2_cnt, overwrite_sample) << endmsg;
ret = false;
}
@ -837,7 +837,7 @@ DiskReader::seek (samplepos_t sample, bool complete_refill)
return 0;
}
if (abs (sample - playback_sample) < (c->front ()->rbuf->reserved_size () / 6)) {
if ((size_t) abs (sample - playback_sample) < (c->front ()->rbuf->reserved_size () / 6)) {
/* we're close enough. Note: this is a heuristic */
return 0;
}
@ -865,7 +865,8 @@ DiskReader::seek (samplepos_t sample, bool complete_refill)
* samples.
*/
samplecnt_t shift = sample > c->front ()->rbuf->reservation_size () ? c->front ()->rbuf->reservation_size () : sample;
const samplecnt_t rsize = (samplecnt_t) c->front()->rbuf->reservation_size();
samplecnt_t shift = (sample > rsize ? rsize : sample);
if (read_reversed) {
/* reading in reverse, so start at a later sample, and read

View file

@ -385,7 +385,7 @@ DiskWriter::set_state (const XMLNode& node, int version)
int rec_safe = 0;
node.get_property (X_("record-safe"), rec_safe);
g_atomic_int_set (&_record_safe, rec_safe);
_record_safe.store (rec_safe);
reset_write_sources (false, true);

View file

@ -60,8 +60,6 @@ alloc_allowed ()
}
#endif
#define g_atomic_uint_get(x) static_cast<guint> (g_atomic_int_get (x))
Graph::Graph (Session& session)
: SessionHandleRef (session)
, _execution_sem ("graph_execution", 0)
@ -107,7 +105,7 @@ void
Graph::reset_thread_list ()
{
uint32_t num_threads = how_many_dsp_threads ();
guint n_workers = g_atomic_uint_get (&_n_workers);
uint32_t n_workers = _n_workers.load();
/* don't bother doing anything here if we already have the right
* number of threads.
@ -136,7 +134,7 @@ Graph::reset_thread_list ()
}
}
while (g_atomic_uint_get (&_n_workers) + 1 != num_threads) {
while (_n_workers.load() + 1 != num_threads) {
sched_yield ();
}
}
@ -144,7 +142,7 @@ Graph::reset_thread_list ()
uint32_t
Graph::n_threads () const
{
return 1 + g_atomic_uint_get (&_n_workers);
return 1 + _n_workers.load();
}
void
@ -165,8 +163,8 @@ Graph::drop_threads ()
_terminate.store (1);
/* Wake-up sleeping threads */
guint tc = g_atomic_uint_get (&_idle_thread_cnt);
assert (tc == g_atomic_uint_get (&_n_workers));
uint32_t tc = _idle_thread_cnt.load();
assert (tc == _n_workers.load());
for (guint i = 0; i < tc; ++i) {
_execution_sem.signal ();
}
@ -213,14 +211,14 @@ Graph::prep ()
_graph_empty = false;
}
assert (g_atomic_uint_get (&_trigger_queue_size) == 0);
assert (_trigger_queue_size.load() == 0);
assert (_graph_empty != (_graph_chain->_n_terminal_nodes > 0));
if (_trigger_queue.capacity () < _graph_chain->_nodes_rt.size ()) {
_trigger_queue.reserve (_graph_chain->_nodes_rt.size ());
}
g_atomic_int_set (&_terminal_refcnt, _graph_chain->_n_terminal_nodes);
_terminal_refcnt.store (_graph_chain->_n_terminal_nodes);
/* Trigger the initial nodes for processing, which are the ones at the `input' end */
for (auto const& i : _graph_chain->_init_trigger_list) {
@ -248,7 +246,7 @@ Graph::reached_terminal_node ()
/* We have run all the nodes that are at the `output' end of
* the graph, so there is nothing more to do this time around.
*/
assert (g_atomic_uint_get (&_trigger_queue_size) == 0);
assert (_trigger_queue_size.load() == 0);
/* Notify caller */
DEBUG_TRACE (DEBUG::ProcessThreads, string_compose ("%1 cycle done.\n", pthread_name ()));
@ -260,8 +258,8 @@ Graph::reached_terminal_node ()
* If there are more threads than CPU cores, some worker-
* threads may only be "on the way" to become idle.
*/
guint n_workers = g_atomic_uint_get (&_n_workers);
while (g_atomic_uint_get (&_idle_thread_cnt) != n_workers) {
uint32_t n_workers = _n_workers.load();
while (_idle_thread_cnt.load() != n_workers) {
sched_yield ();
}
@ -303,9 +301,9 @@ Graph::run_one ()
* other threads.
* This thread as not yet decreased _trigger_queue_size.
*/
guint idle_cnt = g_atomic_uint_get (&_idle_thread_cnt);
guint work_avail = g_atomic_uint_get (&_trigger_queue_size);
guint wakeup = std::min (idle_cnt + 1, work_avail);
uint32_t idle_cnt = _idle_thread_cnt.load();
uint32_t work_avail = _trigger_queue_size.load();
uint32_t wakeup = std::min (idle_cnt + 1, work_avail);
DEBUG_TRACE (DEBUG::ProcessThreads, string_compose ("%1 signals %2 threads\n", pthread_name (), wakeup));
for (guint i = 1; i < wakeup; ++i) {
@ -316,7 +314,7 @@ Graph::run_one ()
while (!to_run) {
/* Wait for work, fall asleep */
_idle_thread_cnt.fetch_add (1);
assert (g_atomic_uint_get (&_idle_thread_cnt) <= g_atomic_uint_get (&_n_workers));
assert (_idle_thread_cnt.load() <= _n_workers.load());
DEBUG_TRACE (DEBUG::ProcessThreads, string_compose ("%1 goes to sleep\n", pthread_name ()));
_execution_sem.wait ();
@ -352,7 +350,7 @@ void
Graph::helper_thread ()
{
_n_workers.fetch_add (1);
guint id = g_atomic_uint_get (&_n_workers);
uint32_t id = _n_workers.load();
/* This is needed for ARDOUR::Session requests called from rt-processors
* in particular Lua scripts may do cross-thread calls */
@ -579,15 +577,15 @@ Graph::in_process_thread () const
void
Graph::process_tasklist (RTTaskList const& rt)
{
assert (g_atomic_uint_get (&_trigger_queue_size) == 0);
assert (_trigger_queue_size.load() == 0);
std::vector<RTTask> const& tasks = rt.tasks ();
if (tasks.empty ()) {
return;
}
g_atomic_int_set (&_trigger_queue_size, tasks.size ());
g_atomic_int_set (&_terminal_refcnt, tasks.size ());
_trigger_queue_size.store (tasks.size ());
_terminal_refcnt.store (tasks.size ());
_graph_empty = false;
for (auto const& t : tasks) {

View file

@ -58,7 +58,7 @@ void
GraphNode::prep (GraphChain const* chain)
{
/* This is the number of nodes that directly feed us */
g_atomic_int_set (&_refcount, init_refcount (chain));
_refcount.store (init_refcount (chain));
}
void
@ -76,7 +76,7 @@ GraphNode::trigger ()
if (PBD::atomic_dec_and_test (_refcount)) {
#if 0 // TODO optimize: remove prep()
/* reset reference count for next cycle */
g_atomic_int_set (&_refcount, _init_refcount[chain]);
_refcount.store (_init_refcount[chain]);
#endif
/* All nodes that feed this node have completed, so this node be processed now. */
_graph->trigger (this);

View file

@ -25,7 +25,7 @@ namespace ARDOUR {
MidiChannelFilter::MidiChannelFilter()
{
g_atomic_int_set (&_mode_mask, 0x0000FFFF);
_mode_mask.store (0x0000FFFF);
}
void
@ -115,7 +115,7 @@ MidiChannelFilter::set_channel_mode(ChannelMode mode, uint16_t mask)
if (old_mode != mode || old_mask != mask) {
mask = force_mask(mode, mask);
g_atomic_int_set (&_mode_mask, (uint32_t(mode) << 16) | uint32_t(mask));
_mode_mask.store ((uint32_t(mode) << 16) | uint32_t(mask));
ChannelModeChanged();
return true;
}
@ -132,7 +132,7 @@ MidiChannelFilter::set_channel_mask(uint16_t mask)
if (old_mask != mask) {
mask = force_mask(mode, mask);
g_atomic_int_set (&_mode_mask, (uint32_t(mode) << 16) | uint32_t(mask));
_mode_mask.store ((uint32_t(mode) << 16) | uint32_t(mask));
ChannelMaskChanged();
return true;
}

View file

@ -4133,7 +4133,7 @@ Route::apply_processor_changes_rt ()
update_signal_latency (true);
}
if (emissions != 0) {
g_atomic_int_set (&_pending_signals, emissions);
_pending_signals.store (emissions);
return true;
}
return (!selfdestruct_sequence.empty ());

View file

@ -1499,7 +1499,7 @@ Session::reset_punch_loop_constraint ()
if (_punch_or_loop.load () == NoConstraint) {
return;
}
g_atomic_int_set (&_punch_or_loop, NoConstraint);
_punch_or_loop.store (NoConstraint);
PunchLoopConstraintChange (); /* EMIT SIGNAL */
}
@ -1972,11 +1972,11 @@ Session::disable_record (bool rt_context, bool force)
if ((rs = (RecordState) _record_status.load ()) != Disabled) {
if (!Config->get_latched_record_enable () || force) {
g_atomic_int_set (&_record_status, Disabled);
_record_status.store (Disabled);
send_immediate_mmc (MIDI::MachineControlCommand (MIDI::MachineControl::cmdRecordExit));
} else {
if (rs == Recording) {
g_atomic_int_set (&_record_status, Enabled);
_record_status.store (Enabled);
}
}
@ -2010,7 +2010,7 @@ Session::maybe_enable_record (bool rt_context)
return;
}
g_atomic_int_set (&_record_status, Enabled);
_record_status.store (Enabled);
// TODO make configurable, perhaps capture-buffer-seconds dependnet?
bool quick_start = true;
@ -6322,7 +6322,7 @@ Session::update_route_record_state ()
int const old = _have_rec_enabled_track.load ();
g_atomic_int_set (&_have_rec_enabled_track, i != rl->end () ? 1 : 0);
_have_rec_enabled_track.store (i != rl->end () ? 1 : 0);
if (_have_rec_enabled_track.load () != old) {
RecordStateChanged (); /* EMIT SIGNAL */
@ -6335,7 +6335,7 @@ Session::update_route_record_state ()
}
}
g_atomic_int_set (&_have_rec_disabled_track, i != rl->end () ? 1 : 0);
_have_rec_disabled_track.store (i != rl->end () ? 1 : 0);
bool record_arm_state_changed = (old != _have_rec_enabled_track.load () );

View file

@ -151,7 +151,7 @@ Session::mmc_record_strobe (MIDI::MachineControl &/*mmc*/)
*/
save_state ("", true);
g_atomic_int_set (&_record_status, Enabled);
_record_status.store (Enabled);
RecordStateChanged (); /* EMIT SIGNAL */
request_roll (TRS_MMC);

View file

@ -320,8 +320,8 @@ Session::get_track_statistics ()
cworst = min (cworst, tr->capture_buffer_load());
}
g_atomic_int_set (&_playback_load, (uint32_t) floor (pworst * 100.0f));
g_atomic_int_set (&_capture_load, (uint32_t) floor (cworst * 100.0f));
_playback_load.store ((uint32_t) floor (pworst * 100.0f));
_capture_load.store ((uint32_t) floor (cworst * 100.0f));
if (actively_recording()) {
set_dirty();
@ -1074,7 +1074,7 @@ Session::process_event (SessionEvent* ev)
break;
case SessionEvent::SetTimecodeTransmission:
g_atomic_int_set (&_suspend_timecode_transmission, ev->yes_or_no ? 0 : 1);
_suspend_timecode_transmission.store (ev->yes_or_no ? 0 : 1);
break;
case SessionEvent::SyncCues:

View file

@ -182,9 +182,9 @@ Session::pre_engine_init (string fullpath)
timerclear (&last_mmc_step);
_processing_prohibited.store (0);
g_atomic_int_set (&_record_status, Disabled);
g_atomic_int_set (&_playback_load, 100);
g_atomic_int_set (&_capture_load, 100);
_record_status.store (Disabled);
_playback_load.store (100);
_capture_load.store (100);
set_next_event ();
_all_route_group->set_active (true, this);

View file

@ -154,8 +154,8 @@ Session::realtime_stop (bool abort, bool clear_state)
reset_punch_loop_constraint ();
g_atomic_int_set (&_playback_load, 100);
g_atomic_int_set (&_capture_load, 100);
_playback_load.store (100);
_capture_load.store (100);
if (config.get_use_video_sync()) {
waiting_for_sync_offset = true;
@ -1118,7 +1118,7 @@ Session::butler_transport_work (bool have_process_lock)
restart:
std::shared_ptr<RouteList> r = routes.reader ();
int on_entry = g_atomic_int_get (&_butler->should_do_transport_work);
int on_entry = _butler->should_do_transport_work.load();
bool finished = true;
PostTransportWork ptw = post_transport_work();
#ifndef NDEBUG
@ -1212,7 +1212,7 @@ Session::non_realtime_overwrite (int on_entry, bool& finished, bool update_loop_
if (tr && tr->pending_overwrite ()) {
tr->overwrite_existing_buffers ();
}
if (on_entry != g_atomic_int_get (&_butler->should_do_transport_work)) {
if (on_entry != _butler->should_do_transport_work.load()) {
finished = false;
return;
}
@ -1279,14 +1279,14 @@ Session::non_realtime_locate ()
std::cerr << "locate to " << tf << " took " << (end - start) << " usecs for " << nt << " tracks = " << usecs_per_track << " per track\n";
#endif
if (usecs_per_track > _current_usecs_per_track.load ()) {
g_atomic_int_set (&_current_usecs_per_track, usecs_per_track);
_current_usecs_per_track.store (usecs_per_track);
}
}
/* we've caught up with whatever the _seek_counter was when we did the
non-realtime locates.
*/
g_atomic_int_set (&_butler_seek_counter, sc);
_butler_seek_counter.store (sc);
{
/* VCAs are quick to locate because they have no data (except
@ -1510,7 +1510,7 @@ Session::non_realtime_stop (bool abort, int on_entry, bool& finished, bool will_
DEBUG_TRACE (DEBUG::Transport, string_compose ("Butler PTW: locate on %1\n", (*i)->name()));
(*i)->non_realtime_locate (_transport_sample);
if (on_entry != g_atomic_int_get (&_butler->should_do_transport_work)) {
if (on_entry != _butler->should_do_transport_work.load()) {
finished = false;
/* we will be back */
return;
@ -1998,7 +1998,8 @@ Session::sync_source_changed (SyncSource type, samplepos_t pos, pframes_t cycle_
mtc_master->ActiveChanged.connect_same_thread (mtc_status_connection, boost::bind (&Session::mtc_status_changed, this, _1));
MTCSyncStateChanged(mtc_master->locked() );
} else {
if (g_atomic_int_compare_and_exchange (&_mtc_active, 1, 0)) {
int canderef (1);
if (_mtc_active.compare_exchange_strong (canderef, 0)) {
MTCSyncStateChanged( false );
}
mtc_status_connection.disconnect ();
@ -2010,7 +2011,8 @@ Session::sync_source_changed (SyncSource type, samplepos_t pos, pframes_t cycle_
ltc_master->ActiveChanged.connect_same_thread (ltc_status_connection, boost::bind (&Session::ltc_status_changed, this, _1));
LTCSyncStateChanged (ltc_master->locked() );
} else {
if (g_atomic_int_compare_and_exchange (&_ltc_active, 1, 0)) {
int canderef (1);
if (_ltc_active.compare_exchange_strong (canderef, 0)) {
LTCSyncStateChanged( false );
}
ltc_status_connection.disconnect ();

View file

@ -1265,7 +1265,8 @@ CoreAudioBackend::pre_process ()
bool connections_changed = false;
bool ports_changed = false;
if (!pthread_mutex_trylock (&_port_callback_mutex)) {
if (g_atomic_int_compare_and_exchange (&_port_change_flag, 1, 0)) {
int canderef (1);
if (_port_change_flag.compare_exchange_strong (canderef, 0)) {
ports_changed = true;
}
if (!_port_connection_queue.empty ()) {

View file

@ -1733,7 +1733,8 @@ PortAudioBackend::process_port_connection_changes ()
bool connections_changed = false;
bool ports_changed = false;
if (!pthread_mutex_trylock (&_port_callback_mutex)) {
if (g_atomic_int_compare_and_exchange (&_port_change_flag, 1, 0)) {
int canderef (1);
if (_port_change_flag.compare_exchange_strong (canderef, 0)) {
ports_changed = true;
}
if (!_port_connection_queue.empty ()) {

View file

@ -38,7 +38,7 @@ public:
atomic_counter (gint value = 0)
{
g_atomic_int_set (&m_value, value);
m_value.store (value);
}
gint get() const
@ -48,7 +48,7 @@ public:
void set (gint new_value)
{
g_atomic_int_set (&m_value, new_value);
m_value.store (new_value);
}
void increment ()
@ -73,12 +73,7 @@ public:
bool compare_and_exchange (gint old_value, gint new_value)
{
return g_atomic_int_compare_and_exchange
(
&m_value,
old_value,
new_value
);
return m_value.compare_exchange_strong (old_value, new_value);
}
/**

View file

@ -20,21 +20,12 @@
#ifndef _pbd_mpc_queue_h_
#define _pbd_mpc_queue_h_
#if defined(__cplusplus) && __cplusplus >= 201103L
# define MPMC_USE_STD_ATOMIC 1
#endif
#include <cassert>
#include <stdint.h>
#include <stdlib.h>
#ifdef MPMC_USE_STD_ATOMIC
# include <atomic>
# define MPMC_QUEUE_TYPE std::atomic<size_t>
#else
# include <glib.h>
# define MPMC_QUEUE_TYPE std::atomic<unsigned int>
#endif
namespace PBD {
@ -88,64 +79,37 @@ public:
void
clear ()
{
#ifdef MPMC_USE_STD_ATOMIC
for (size_t i = 0; i <= _buffer_mask; ++i) {
_buffer[i]._sequence.store (i, std::memory_order_relaxed);
}
_enqueue_pos.store (0, std::memory_order_relaxed);
_dequeue_pos.store (0, std::memory_order_relaxed);
#else
for (size_t i = 0; i <= _buffer_mask; ++i) {
g_atomic_int_set (&_buffer[i]._sequence, i);
}
_enqueue_pos.store (0);
_dequeue_pos.store (0);
#endif
}
bool
push_back (T const& data)
{
cell_t* cell;
#ifdef MPMC_USE_STD_ATOMIC
size_t pos = _enqueue_pos.load (std::memory_order_relaxed);
#else
guint pos = _enqueue_pos.load ();
#endif
for (;;) {
cell = &_buffer[pos & _buffer_mask];
#ifdef MPMC_USE_STD_ATOMIC
size_t seq = cell->_sequence.load (std::memory_order_acquire);
#else
guint seq = g_atomic_int_get (&cell->_sequence);
#endif
intptr_t dif = (intptr_t)seq - (intptr_t)pos;
if (dif == 0) {
#ifdef MPMC_USE_STD_ATOMIC
if (_enqueue_pos.compare_exchange_weak (pos, pos + 1, std::memory_order_relaxed))
#else
if (g_atomic_int_compare_and_exchange (&_enqueue_pos, pos, pos + 1))
#endif
{
break;
}
} else if (dif < 0) {
return false;
} else {
#ifdef MPMC_USE_STD_ATOMIC
pos = _enqueue_pos.load (std::memory_order_relaxed);
#else
pos = _enqueue_pos.load ();
#endif
}
}
cell->_data = data;
#ifdef MPMC_USE_STD_ATOMIC
cell->_sequence.store (pos + 1, std::memory_order_release);
#else
g_atomic_int_set (&cell->_sequence, pos + 1);
#endif
return true;
}
@ -154,45 +118,26 @@ public:
pop_front (T& data)
{
cell_t* cell;
#ifdef MPMC_USE_STD_ATOMIC
size_t pos = _dequeue_pos.load (std::memory_order_relaxed);
#else
guint pos = _dequeue_pos.load ();
#endif
for (;;) {
cell = &_buffer[pos & _buffer_mask];
#ifdef MPMC_USE_STD_ATOMIC
size_t seq = cell->_sequence.load (std::memory_order_acquire);
#else
guint seq = g_atomic_int_get (&cell->_sequence);
#endif
intptr_t dif = (intptr_t)seq - (intptr_t) (pos + 1);
if (dif == 0) {
#ifdef MPMC_USE_STD_ATOMIC
if (_dequeue_pos.compare_exchange_weak (pos, pos + 1, std::memory_order_relaxed))
#else
if (g_atomic_int_compare_and_exchange (&_dequeue_pos, pos, pos + 1))
#endif
{
break;
}
} else if (dif < 0) {
return false;
} else {
#ifdef MPMC_USE_STD_ATOMIC
pos = _dequeue_pos.load (std::memory_order_relaxed);
#else
pos = _dequeue_pos.load ();
#endif
}
}
data = cell->_data;
#ifdef MPMC_USE_STD_ATOMIC
cell->_sequence.store (pos + _buffer_mask + 1, std::memory_order_release);
#else
g_atomic_int_set (&cell->_sequence, pos + _buffer_mask + 1);
#endif
return true;
}
@ -214,7 +159,6 @@ private:
} // namespace PBD
#undef MPMC_USE_STD_ATOMIC
#undef MPMC_QUEUE_TYPE
#endif

View file

@ -35,13 +35,13 @@ template<class T>
class /*LIBPBD_API*/ PlaybackBuffer
{
public:
static guint power_of_two_size (guint sz) {
static size_t power_of_two_size (size_t sz) {
int32_t power_of_two;
for (power_of_two = 1; 1U << power_of_two < sz; ++power_of_two);
return 1U << power_of_two;
}
PlaybackBuffer (guint sz, guint res = 8191)
PlaybackBuffer (size_t sz, size_t res = 8191)
: reservation (res)
{
sz += reservation;
@ -60,7 +60,7 @@ public:
/* init (mlock) */
T *buffer () { return buf; }
/* init (mlock) */
guint bufsize () const { return size; }
size_t bufsize () const { return size; }
/* write-thread */
void reset () {
@ -75,20 +75,20 @@ public:
/* called from rt (reader) thread for new buffers */
void align_to (PlaybackBuffer const& other) {
Glib::Threads::Mutex::Lock lm (_reset_lock);
g_atomic_int_set (&read_idx, g_atomic_int_get (&other.read_idx));
g_atomic_int_set (&write_idx, g_atomic_int_get (&other.write_idx));
g_atomic_int_set (&reserved, g_atomic_int_get (&other.reserved));
read_idx.store (other.read_idx.load());
write_idx.store (other.write_idx.load());
reserved.store (other.reserved.load());
memset (buf, 0, size * sizeof (T));
}
/* write-thread */
guint write_space () const {
guint w, r;
size_t write_space () const {
size_t w, r;
w = write_idx.load ();
r = read_idx.load ();
guint rv;
size_t rv;
if (w > r) {
rv = ((r + size) - w) & size_mask;
@ -111,8 +111,8 @@ public:
}
/* read-thread */
guint read_space () const {
guint w, r;
size_t read_space () const {
size_t w, r;
w = write_idx.load ();
r = read_idx.load ();
@ -125,8 +125,8 @@ public:
}
/* write thread */
guint overwritable_at (guint r) const {
guint w;
size_t overwritable_at (size_t r) const {
size_t w;
w = write_idx.load ();
@ -137,26 +137,26 @@ public:
}
/* read-thead */
guint read (T *dest, guint cnt, bool commit = true, guint offset = 0);
size_t read (T *dest, size_t cnt, bool commit = true, size_t offset = 0);
/* write-thead */
guint write (T const * src, guint cnt);
size_t write (T const * src, size_t cnt);
/* write-thead */
guint write_zero (guint cnt);
size_t write_zero (size_t cnt);
/* read-thead */
guint increment_write_ptr (guint cnt)
size_t increment_write_ptr (size_t cnt)
{
cnt = std::min (cnt, write_space ());
g_atomic_int_set (&write_idx, (write_idx.load () + cnt) & size_mask);
write_idx.store ((write_idx.load () + cnt) & size_mask);
return cnt;
}
/* read-thead */
guint decrement_read_ptr (guint cnt)
size_t decrement_read_ptr (size_t cnt)
{
SpinLock sl (_reservation_lock);
guint r = read_idx.load ();
guint res = reserved.load ();
size_t r = read_idx.load ();
size_t res = reserved.load ();
cnt = std::min (cnt, res);
@ -164,19 +164,19 @@ public:
res -= cnt;
read_idx.store (r);
g_atomic_int_set (&reserved, res);
reserved.store (res);
return cnt;
}
/* read-thead */
guint increment_read_ptr (guint cnt)
size_t increment_read_ptr (size_t cnt)
{
cnt = std::min (cnt, read_space ());
SpinLock sl (_reservation_lock);
g_atomic_int_set (&read_idx, (read_idx.load () + cnt) & size_mask);
g_atomic_int_set (&reserved, std::min (reservation, reserved.load () + cnt));
read_idx.store ((read_idx.load () + cnt) & size_mask);
reserved.store (std::min (reservation, reserved.load () + cnt));
return cnt;
}
@ -184,28 +184,28 @@ public:
/* read-thead */
bool can_seek (int64_t cnt) {
if (cnt > 0) {
return read_space() >= cnt;
return read_space() >= (size_t) cnt;
} else if (cnt < 0) {
return reserved.load () >= -cnt;
return reserved.load () >= (size_t) -cnt;
} else {
return true;
}
}
guint read_ptr() const { return read_idx.load (); }
guint write_ptr() const { return write_idx.load (); }
guint reserved_size() const { return reserved.load (); }
guint reservation_size() const { return reservation; }
size_t read_ptr() const { return read_idx.load (); }
size_t write_ptr() const { return write_idx.load (); }
size_t reserved_size() const { return reserved.load (); }
size_t reservation_size() const { return reservation; }
private:
T *buf;
const guint reservation;
guint size;
guint size_mask;
const size_t reservation;
size_t size;
size_t size_mask;
mutable std::atomic<int> write_idx;
mutable std::atomic<int> read_idx;
mutable std::atomic<int> reserved;
mutable std::atomic<size_t> write_idx;
mutable std::atomic<size_t> read_idx;
mutable std::atomic<size_t> reserved;
/* spinlock will be used to update write_idx and reserved in sync */
spinlock_t _reservation_lock;
@ -213,20 +213,20 @@ private:
Glib::Threads::Mutex _reset_lock;
};
template<class T> /*LIBPBD_API*/ guint
PlaybackBuffer<T>::write (T const *src, guint cnt)
template<class T> /*LIBPBD_API*/ size_t
PlaybackBuffer<T>::write (T const *src, size_t cnt)
{
guint w = write_idx.load ();
const guint free_cnt = write_space ();
size_t w = write_idx.load ();
const size_t free_cnt = write_space ();
if (free_cnt == 0) {
return 0;
}
const guint to_write = cnt > free_cnt ? free_cnt : cnt;
const guint cnt2 = w + to_write;
const size_t to_write = cnt > free_cnt ? free_cnt : cnt;
const size_t cnt2 = w + to_write;
guint n1, n2;
size_t n1, n2;
if (cnt2 > size) {
n1 = size - w;
n2 = cnt2 & size_mask;
@ -247,20 +247,20 @@ PlaybackBuffer<T>::write (T const *src, guint cnt)
return to_write;
}
template<class T> /*LIBPBD_API*/ guint
PlaybackBuffer<T>::write_zero (guint cnt)
template<class T> /*LIBPBD_API*/ size_t
PlaybackBuffer<T>::write_zero (size_t cnt)
{
guint w = write_idx.load ();
const guint free_cnt = write_space ();
size_t w = write_idx.load ();
const size_t free_cnt = write_space ();
if (free_cnt == 0) {
return 0;
}
const guint to_write = cnt > free_cnt ? free_cnt : cnt;
const guint cnt2 = w + to_write;
const size_t to_write = cnt > free_cnt ? free_cnt : cnt;
const size_t cnt2 = w + to_write;
guint n1, n2;
size_t n1, n2;
if (cnt2 > size) {
n1 = size - w;
n2 = cnt2 & size_mask;
@ -281,8 +281,8 @@ PlaybackBuffer<T>::write_zero (guint cnt)
return to_write;
}
template<class T> /*LIBPBD_API*/ guint
PlaybackBuffer<T>::read (T *dest, guint cnt, bool commit, guint offset)
template<class T> /*LIBPBD_API*/ size_t
PlaybackBuffer<T>::read (T *dest, size_t cnt, bool commit, size_t offset)
{
Glib::Threads::Mutex::Lock lm (_reset_lock, Glib::Threads::TRY_LOCK);
if (!lm.locked ()) {
@ -290,10 +290,10 @@ PlaybackBuffer<T>::read (T *dest, guint cnt, bool commit, guint offset)
return 0;
}
guint r = read_idx.load ();
const guint w = write_idx.load ();
size_t r = read_idx.load ();
const size_t w = write_idx.load ();
guint free_cnt = (w > r) ? (w - r) : ((w - r + size) & size_mask);
size_t free_cnt = (w > r) ? (w - r) : ((w - r + size) & size_mask);
if (!commit && offset > 0) {
if (offset > free_cnt) {
@ -303,11 +303,11 @@ PlaybackBuffer<T>::read (T *dest, guint cnt, bool commit, guint offset)
r = (r + offset) & size_mask;
}
const guint to_read = cnt > free_cnt ? free_cnt : cnt;
const size_t to_read = cnt > free_cnt ? free_cnt : cnt;
const guint cnt2 = r + to_read;
const size_t cnt2 = r + to_read;
guint n1, n2;
size_t n1, n2;
if (cnt2 > size) {
n1 = size - r;
n2 = cnt2 & size_mask;
@ -327,7 +327,7 @@ PlaybackBuffer<T>::read (T *dest, guint cnt, bool commit, guint offset)
if (commit) {
SpinLock sl (_reservation_lock);
read_idx.store (r);
g_atomic_int_set (&reserved, std::min (reservation, reserved.load () + to_read));
reserved.store (std::min (reservation, reserved.load () + to_read));
}
return to_read;
}

View file

@ -72,15 +72,15 @@ class /*LIBPBD_API*/ RingBufferNPT
void get_write_vector (rw_vector *);
void decrement_read_ptr (size_t cnt) {
g_atomic_int_set (&read_ptr, (read_ptr.load () - cnt) % size);
read_ptr.store ((read_ptr.load () - cnt) % size);
}
void increment_read_ptr (size_t cnt) {
g_atomic_int_set (&read_ptr, (read_ptr.load () + cnt) % size);
read_ptr.store ((read_ptr.load () + cnt) % size);
}
void increment_write_ptr (size_t cnt) {
g_atomic_int_set (&write_ptr, (write_ptr.load () + cnt) % size);
write_ptr.store ((write_ptr.load () + cnt) % size);
}
size_t write_space () {
@ -161,7 +161,7 @@ RingBufferNPT<T>::read (T *dest, size_t cnt)
priv_read_ptr = n2;
}
g_atomic_int_set (&read_ptr, priv_read_ptr);
read_ptr.store (priv_read_ptr);
return to_read;
}
@ -200,7 +200,7 @@ RingBufferNPT<T>::write (const T *src, size_t cnt)
priv_write_ptr = n2;
}
g_atomic_int_set (&write_ptr, priv_write_ptr);
write_ptr.store (priv_write_ptr);
return to_write;
}

View file

@ -105,7 +105,7 @@ class LIBPBD_API Stateful {
virtual void suspend_property_changes ();
virtual void resume_property_changes ();
bool property_changes_suspended() const { return g_atomic_int_get (const_cast<std::atomic<int>*> (&_stateful_frozen)) > 0; }
bool property_changes_suspended() const { return _stateful_frozen.load() > 0; }
protected: