Introduce new time for session-relative frame time, and make source interface capable of handling 64-bit long sessions.

sframes_t is "session frames".  The rules for time stamps are:
 - Anything relative to transport time, session position, etc, should be sframes_t
 - Anything relative to jack cycles, including the length thereof, should be nframes_t

To support sessions which exceed UINT32_MAX frames, we need to replace all the uses of
nframes_t for session time with sframes_t, and make sure the conversions are sound.
This does not depend on jack's nframes_t; that we are using the same type at all right now was an oops.

This is also be kinda nice for readability since the two different time bases have different types...


git-svn-id: svn://localhost/ardour2/branches/3.0@4636 d708f5d6-7413-0410-9779-e7cbd77b26cf
This commit is contained in:
David Robillard 2009-02-19 05:45:11 +00:00
parent 0f71728a92
commit 8a28ea6154
18 changed files with 101 additions and 92 deletions

View file

@ -58,13 +58,13 @@ public:
/* this block of methods do nothing for regular file sources, but are significant
for files used in destructive recording.
*/
virtual nframes_t last_capture_start_frame() const { return 0; }
virtual void mark_capture_start (nframes_t) {}
virtual sframes_t last_capture_start_frame() const { return 0; }
virtual void mark_capture_start (sframes_t) {}
virtual void mark_capture_end () {}
virtual void clear_capture_marks() {}
virtual bool one_of_several_channels () const { return false; }
virtual int update_header (nframes_t when, struct tm&, time_t) = 0;
virtual int update_header (sframes_t when, struct tm&, time_t) = 0;
virtual int flush_header () = 0;
void mark_streaming_write_completed ();

View file

@ -65,7 +65,7 @@ class AudioSource : virtual public Source,
return read (dst, (nframes_t) start, (nframes_t) cnt);
}
virtual nframes_t read (Sample *dst, nframes_t start, nframes_t cnt) const;
virtual nframes_t read (Sample *dst, sframes_t start, nframes_t cnt) const;
virtual nframes_t write (Sample *src, nframes_t cnt);
virtual float sample_rate () const = 0;
@ -81,7 +81,7 @@ class AudioSource : virtual public Source,
uint32_t write_data_count() const { return _write_data_count; }
int read_peaks (PeakData *peaks, nframes_t npeaks,
nframes_t start, nframes_t cnt, double samples_per_visual_peak) const;
sframes_t start, nframes_t cnt, double samples_per_visual_peak) const;
int build_peaks ();
bool peaks_ready (sigc::slot<void>, sigc::connection&) const;
@ -126,23 +126,23 @@ class AudioSource : virtual public Source,
int initialize_peakfile (bool newfile, Glib::ustring path);
int build_peaks_from_scratch ();
int compute_and_write_peaks (Sample* buf, nframes_t first_frame, nframes_t cnt,
int compute_and_write_peaks (Sample* buf, sframes_t first_frame, nframes_t cnt,
bool force, bool intermediate_peaks_ready_signal);
void truncate_peakfile();
mutable off_t _peak_byte_max; // modified in compute_and_write_peak()
virtual nframes_t read_unlocked (Sample *dst, nframes_t start, nframes_t cnt) const = 0;
virtual nframes_t read_unlocked (Sample *dst, sframes_t start, nframes_t cnt) const = 0;
virtual nframes_t write_unlocked (Sample *dst, nframes_t cnt) = 0;
virtual Glib::ustring peak_path(Glib::ustring audio_path) = 0;
virtual Glib::ustring find_broken_peakfile (Glib::ustring missing_peak_path,
Glib::ustring audio_path) = 0;
virtual int read_peaks_with_fpp (PeakData *peaks,
nframes_t npeaks, nframes_t start, nframes_t cnt,
nframes_t npeaks, sframes_t start, nframes_t cnt,
double samples_per_visual_peak, nframes_t fpp) const;
int compute_and_write_peaks (Sample* buf, nframes_t first_frame, nframes_t cnt,
int compute_and_write_peaks (Sample* buf, sframes_t first_frame, nframes_t cnt,
bool force, bool intermediate_peaks_ready_signal, nframes_t frames_per_peak);
private:

View file

@ -29,22 +29,22 @@ namespace ARDOUR {
class Session;
class BeatsFramesConverter : public Evoral::TimeConverter<double,nframes_t> {
class BeatsFramesConverter : public Evoral::TimeConverter<double,sframes_t> {
public:
BeatsFramesConverter(Session& session, nframes_t origin)
BeatsFramesConverter(Session& session, sframes_t origin)
: _session(session)
, _origin(origin)
{}
nframes_t to(double beats) const;
double from(nframes_t frames) const;
sframes_t to(double beats) const;
double from(sframes_t frames) const;
nframes_t origin() const { return _origin; }
void set_origin(nframes_t origin) { _origin = origin; }
sframes_t origin() const { return _origin; }
void set_origin(sframes_t origin) { _origin = origin; }
private:
Session& _session;
nframes_t _origin;
sframes_t _origin;
};
} /* namespace ARDOUR */

View file

@ -57,20 +57,20 @@ class MidiSource : virtual public Source
* \param negative_stamp_offset Offset to subtract from event times written to dst
*/
virtual nframes_t midi_read (MidiRingBuffer<nframes_t>& dst,
nframes_t position,
nframes_t start, nframes_t cnt,
nframes_t stamp_offset, nframes_t negative_stamp_offset) const;
sframes_t position,
sframes_t start, nframes_t cnt,
sframes_t stamp_offset, sframes_t negative_stamp_offset) const;
virtual nframes_t midi_write (MidiRingBuffer<nframes_t>& src,
nframes_t position,
sframes_t position,
nframes_t cnt);
virtual void append_event_unlocked_beats(const Evoral::Event<double>& ev) = 0;
virtual void append_event_unlocked_frames(const Evoral::Event<nframes_t>& ev,
nframes_t position) = 0;
sframes_t position) = 0;
virtual void mark_streaming_midi_write_started (NoteMode mode, nframes_t start_time);
virtual void mark_streaming_midi_write_started (NoteMode mode, sframes_t start_time);
virtual void mark_streaming_write_started ();
virtual void mark_streaming_write_completed ();
@ -85,7 +85,7 @@ class MidiSource : virtual public Source
static sigc::signal<void,MidiSource*> MidiSourceCreated;
// Signal a range of recorded data is available for reading from model()
mutable sigc::signal<void,nframes_t,nframes_t> ViewDataRangeReady;
mutable sigc::signal<void,sframes_t,nframes_t> ViewDataRangeReady;
XMLNode& get_state ();
int set_state (const XMLNode&);
@ -110,12 +110,12 @@ class MidiSource : virtual public Source
virtual void flush_midi() = 0;
virtual nframes_t read_unlocked (MidiRingBuffer<nframes_t>& dst,
nframes_t position,
nframes_t start, nframes_t cnt,
nframes_t stamp_offset, nframes_t negative_stamp_offset) const = 0;
sframes_t position,
sframes_t start, nframes_t cnt,
sframes_t stamp_offset, sframes_t negative_stamp_offset) const = 0;
virtual nframes_t write_unlocked (MidiRingBuffer<nframes_t>& dst,
nframes_t position,
sframes_t position,
nframes_t cnt) = 0;
std::string _captured_for;
@ -126,7 +126,7 @@ class MidiSource : virtual public Source
bool _writing;
mutable Evoral::Sequence<double>::const_iterator _model_iter;
mutable nframes_t _last_read_end;
mutable sframes_t _last_read_end;
private:
bool file_changed (std::string path);

View file

@ -27,7 +27,7 @@ namespace ARDOUR {
class SilentFileSource : public AudioFileSource {
public:
int update_header (nframes_t when, struct tm&, time_t) { return 0; }
int update_header (sframes_t when, struct tm&, time_t) { return 0; }
int flush_header () { return 0; }
float sample_rate () const { return _sample_rate; }
@ -47,7 +47,7 @@ protected:
_length = len;
}
nframes_t read_unlocked (Sample *dst, nframes_t start, nframes_t cnt) const {
nframes_t read_unlocked (Sample *dst, sframes_t start, nframes_t cnt) const {
memset (dst, 0, sizeof (Sample) * cnt);
return cnt;
}
@ -56,7 +56,7 @@ protected:
void set_header_timeline_position () {}
int read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, nframes_t start, nframes_t cnt,
int read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, sframes_t start, nframes_t cnt,
double samples_per_unit, nframes_t fpp) const {
memset (peaks, 0, sizeof (PeakData) * npeaks);
return 0;

View file

@ -52,9 +52,9 @@ public:
bool set_name (const std::string& newname) { return (set_source_name(newname, false) == 0); }
void append_event_unlocked_beats (const Evoral::Event<double>& ev);
void append_event_unlocked_frames (const Evoral::Event<nframes_t>& ev, nframes_t position);
void append_event_unlocked_frames (const Evoral::Event<nframes_t>& ev, sframes_t position);
void mark_streaming_midi_write_started (NoteMode mode, nframes_t start_time);
void mark_streaming_midi_write_started (NoteMode mode, sframes_t start_time);
void mark_streaming_write_completed ();
XMLNode& get_state ();
@ -71,20 +71,20 @@ public:
private:
nframes_t read_unlocked (MidiRingBuffer<nframes_t>& dst,
nframes_t position,
nframes_t start,
sframes_t position,
sframes_t start,
nframes_t cnt,
nframes_t stamp_offset,
nframes_t negative_stamp_offset) const;
sframes_t stamp_offset,
sframes_t negative_stamp_offset) const;
nframes_t write_unlocked (MidiRingBuffer<nframes_t>& src,
nframes_t position,
sframes_t position,
nframes_t cnt);
void set_default_controls_interpolation ();
double _last_ev_time_beats;
nframes_t _last_ev_time_frames;
sframes_t _last_ev_time_frames;
};
}; /* namespace ARDOUR */

View file

@ -43,13 +43,13 @@ class SndFileSource : public AudioFileSource {
~SndFileSource ();
float sample_rate () const;
int update_header (nframes_t when, struct tm&, time_t);
int update_header (sframes_t when, struct tm&, time_t);
int flush_header ();
nframes_t natural_position () const;
sframes_t natural_position () const;
nframes_t last_capture_start_frame() const;
void mark_capture_start (nframes_t);
sframes_t last_capture_start_frame() const;
void mark_capture_start (sframes_t);
void mark_capture_end ();
void clear_capture_marks();
@ -65,10 +65,10 @@ class SndFileSource : public AudioFileSource {
protected:
void set_header_timeline_position ();
nframes_t read_unlocked (Sample *dst, nframes_t start, nframes_t cnt) const;
nframes_t read_unlocked (Sample *dst, sframes_t start, nframes_t cnt) const;
nframes_t write_unlocked (Sample *dst, nframes_t cnt);
nframes_t write_float (Sample* data, nframes_t pos, nframes_t cnt);
nframes_t write_float (Sample* data, sframes_t pos, nframes_t cnt);
private:
SNDFILE *sf;
@ -77,7 +77,7 @@ class SndFileSource : public AudioFileSource {
void init_sndfile ();
int open();
int setup_broadcast_info (nframes_t when, struct tm&, time_t);
int setup_broadcast_info (sframes_t when, struct tm&, time_t);
/* destructive */
@ -87,8 +87,8 @@ class SndFileSource : public AudioFileSource {
bool _capture_start;
bool _capture_end;
nframes_t capture_start_frame;
nframes_t file_pos; // unit is frames
sframes_t capture_start_frame;
sframes_t file_pos; // unit is frames
nframes_t xfade_out_count;
nframes_t xfade_in_count;
Sample* xfade_buf;

View file

@ -61,11 +61,11 @@ class Source : public SessionObject, public boost::noncopyable
time_t timestamp() const { return _timestamp; }
void stamp (time_t when) { _timestamp = when; }
nframes_t length() const { return _length; }
sframes_t length() const { return _length; }
virtual const Glib::ustring& path() const = 0;
virtual nframes_t natural_position() const { return 0; }
virtual sframes_t natural_position() const { return 0; }
void mark_for_remove();
@ -104,7 +104,7 @@ class Source : public SessionObject, public boost::noncopyable
std::string get_transients_path() const;
int load_transients (const std::string&);
void update_length (nframes_t pos, nframes_t cnt);
void update_length (sframes_t pos, sframes_t cnt);
int64_t timeline_position() const { return _timeline_position; }
virtual void set_timeline_position (int64_t pos);
@ -118,7 +118,7 @@ class Source : public SessionObject, public boost::noncopyable
DataType _type;
Flag _flags;
time_t _timestamp;
nframes_t _length;
sframes_t _length;
int64_t _timeline_position;
bool _analysed;
mutable Glib::Mutex _lock;

View file

@ -60,6 +60,13 @@ namespace ARDOUR {
typedef uint64_t microseconds_t;
typedef uint32_t nframes_t;
/** "Session frames", frames relative to the session timeline.
* Everything related to transport position etc. should be of this type.
* We might want to make this a compile time option for 32-bitters who
* don't want to pay for extremely long session times they don't need...
*/
typedef int64_t sframes_t;
enum IOChange {
NoChange = 0,
ConfigurationChanged = 0x1,
@ -75,7 +82,7 @@ namespace ARDOUR {
};
OverlapType coverage (nframes_t start_a, nframes_t end_a,
nframes_t start_b, nframes_t end_b);
nframes_t start_b, nframes_t end_b);
/** See parameter.h
* XXX: I don't think/hope these hex values matter anymore.

View file

@ -247,7 +247,7 @@ AudioSource::initialize_peakfile (bool newfile, ustring audio_path)
}
nframes_t
AudioSource::read (Sample *dst, nframes_t start, nframes_t cnt) const
AudioSource::read (Sample *dst, sframes_t start, nframes_t cnt) const
{
Glib::Mutex::Lock lm (_lock);
return read_unlocked (dst, start, cnt);
@ -261,13 +261,13 @@ AudioSource::write (Sample *dst, nframes_t cnt)
}
int
AudioSource::read_peaks (PeakData *peaks, nframes_t npeaks, nframes_t start, nframes_t cnt, double samples_per_visual_peak) const
AudioSource::read_peaks (PeakData *peaks, nframes_t npeaks, sframes_t start, nframes_t cnt, double samples_per_visual_peak) const
{
return read_peaks_with_fpp (peaks, npeaks, start, cnt, samples_per_visual_peak, _FPP);
}
int
AudioSource::read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, nframes_t start, nframes_t cnt,
AudioSource::read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, sframes_t start, nframes_t cnt,
double samples_per_visual_peak, nframes_t samples_per_file_peak) const
{
Glib::Mutex::Lock lm (_lock);
@ -426,7 +426,7 @@ AudioSource::read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, nframes_t s
if (i == stored_peaks_read) {
uint32_t start_byte = current_stored_peak * sizeof(PeakData);
tnp = min ((_length/samples_per_file_peak - current_stored_peak), (nframes_t) expected_peaks);
tnp = min ((nframes_t)(_length/samples_per_file_peak - current_stored_peak), (nframes_t) expected_peaks);
to_read = min (chunksize, tnp);
#ifdef DEBUG_READ_PEAKS
@ -520,7 +520,7 @@ AudioSource::read_peaks_with_fpp (PeakData *peaks, nframes_t npeaks, nframes_t s
if (i == frames_read) {
to_read = min (chunksize, (_length - current_frame));
to_read = min (chunksize, nframes_t(_length - current_frame));
if (to_read == 0) {
/* XXX ARGH .. out by one error ... need to figure out why this happens
@ -681,14 +681,15 @@ AudioSource::done_with_peakfile_writes (bool done)
}
int
AudioSource::compute_and_write_peaks (Sample* buf, nframes_t first_frame, nframes_t cnt, bool force, bool intermediate_peaks_ready)
AudioSource::compute_and_write_peaks (Sample* buf, sframes_t first_frame, nframes_t cnt,
bool force, bool intermediate_peaks_ready)
{
return compute_and_write_peaks (buf, first_frame, cnt, force, intermediate_peaks_ready, _FPP);
}
int
AudioSource::compute_and_write_peaks (Sample* buf, nframes_t first_frame, nframes_t cnt, bool force,
bool intermediate_peaks_ready, nframes_t fpp)
AudioSource::compute_and_write_peaks (Sample* buf, sframes_t first_frame, nframes_t cnt,
bool force, bool intermediate_peaks_ready, nframes_t fpp)
{
Sample* buf2 = 0;
nframes_t to_do;

View file

@ -26,7 +26,7 @@
namespace ARDOUR {
nframes_t
sframes_t
BeatsFramesConverter::to(double beats) const
{
// FIXME: assumes tempo never changes after origin
@ -39,7 +39,7 @@ BeatsFramesConverter::to(double beats) const
}
double
BeatsFramesConverter::from(nframes_t frames) const
BeatsFramesConverter::from(sframes_t frames) const
{
// FIXME: assumes tempo never changes after origin
const Tempo& tempo = _session.tempo_map().tempo_at(_origin);

View file

@ -108,9 +108,9 @@ MidiSource::invalidate ()
}
nframes_t
MidiSource::midi_read (MidiRingBuffer<nframes_t>& dst, nframes_t position,
nframes_t start, nframes_t cnt,
nframes_t stamp_offset, nframes_t negative_stamp_offset) const
MidiSource::midi_read (MidiRingBuffer<nframes_t>& dst, sframes_t position,
sframes_t start, nframes_t cnt,
sframes_t stamp_offset, sframes_t negative_stamp_offset) const
{
Glib::Mutex::Lock lm (_lock);
@ -147,7 +147,7 @@ MidiSource::midi_read (MidiRingBuffer<nframes_t>& dst, nframes_t position,
}
nframes_t
MidiSource::midi_write (MidiRingBuffer<nframes_t>& dst, nframes_t position, nframes_t cnt)
MidiSource::midi_write (MidiRingBuffer<nframes_t>& dst, sframes_t position, nframes_t cnt)
{
Glib::Mutex::Lock lm (_lock);
return write_unlocked (dst, position, cnt);
@ -164,7 +164,7 @@ MidiSource::file_changed (string path)
}
void
MidiSource::mark_streaming_midi_write_started (NoteMode mode, nframes_t start_frame)
MidiSource::mark_streaming_midi_write_started (NoteMode mode, sframes_t start_frame)
{
set_timeline_position(start_frame);

View file

@ -1544,7 +1544,7 @@ Region::verify_length (nframes_t len)
nframes_t maxlen = 0;
for (uint32_t n=0; n < _sources.size(); ++n) {
maxlen = max (maxlen, _sources[n]->length() - _start);
maxlen = max (maxlen, (nframes_t)_sources[n]->length() - _start);
}
len = min (len, maxlen);
@ -1562,7 +1562,7 @@ Region::verify_start_and_length (nframes_t new_start, nframes_t& new_length)
nframes_t maxlen = 0;
for (uint32_t n=0; n < _sources.size(); ++n) {
maxlen = max (maxlen, _sources[n]->length() - new_start);
maxlen = max (maxlen, (nframes_t)_sources[n]->length() - new_start);
}
new_length = min (new_length, maxlen);

View file

@ -95,9 +95,9 @@ SMFSource::~SMFSource ()
/** All stamps in audio frames */
nframes_t
SMFSource::read_unlocked (MidiRingBuffer<nframes_t>& dst, nframes_t position,
nframes_t start, nframes_t dur,
nframes_t stamp_offset, nframes_t negative_stamp_offset) const
SMFSource::read_unlocked (MidiRingBuffer<nframes_t>& dst, sframes_t position,
sframes_t start, nframes_t dur,
sframes_t stamp_offset, sframes_t negative_stamp_offset) const
{
int ret = 0;
uint64_t time = 0; // in SMF ticks, 1 tick per _ppqn
@ -146,7 +146,7 @@ SMFSource::read_unlocked (MidiRingBuffer<nframes_t>& dst, nframes_t position,
ev_type = EventTypeMap::instance().midi_event_type(ev_buffer[0]);
assert(time >= start_ticks);
const nframes_t ev_frame_time = converter.to(time / (double)ppqn()) + stamp_offset;
const sframes_t ev_frame_time = converter.to(time / (double)ppqn()) + stamp_offset;
if (ev_frame_time < start + dur) {
dst.write(ev_frame_time - negative_stamp_offset, ev_type, ev_size, ev_buffer);
@ -167,7 +167,7 @@ SMFSource::read_unlocked (MidiRingBuffer<nframes_t>& dst, nframes_t position,
/** All stamps in audio frames */
nframes_t
SMFSource::write_unlocked (MidiRingBuffer<nframes_t>& src, nframes_t position, nframes_t dur)
SMFSource::write_unlocked (MidiRingBuffer<nframes_t>& src, sframes_t position, nframes_t dur)
{
_write_data_count = 0;
@ -227,7 +227,7 @@ SMFSource::write_unlocked (MidiRingBuffer<nframes_t>& src, nframes_t position, n
Evoral::SMF::flush();
free(buf);
const nframes_t oldlen = _length;
const sframes_t oldlen = _length;
update_length(oldlen, dur);
ViewDataRangeReady(position + oldlen, dur); /* EMIT SIGNAL */
@ -269,7 +269,7 @@ SMFSource::append_event_unlocked_beats (const Evoral::Event<double>& ev)
/** Append an event with a timestamp in frames (nframes_t) */
void
SMFSource::append_event_unlocked_frames (const Evoral::Event<nframes_t>& ev, nframes_t position)
SMFSource::append_event_unlocked_frames (const Evoral::Event<nframes_t>& ev, sframes_t position)
{
if (ev.size() == 0) {
return;
@ -287,7 +287,7 @@ SMFSource::append_event_unlocked_frames (const Evoral::Event<nframes_t>& ev, nfr
BeatsFramesConverter converter(_session, position);
const nframes_t delta_time_frames = ev.time() - _last_ev_time_frames;
const sframes_t delta_time_frames = ev.time() - _last_ev_time_frames;
const double delta_time_beats = converter.from(delta_time_frames);
const uint32_t delta_time_ticks = (uint32_t)(lrint(delta_time_beats * (double)ppqn()));
@ -329,7 +329,7 @@ SMFSource::set_state (const XMLNode& node)
}
void
SMFSource::mark_streaming_midi_write_started (NoteMode mode, nframes_t start_frame)
SMFSource::mark_streaming_midi_write_started (NoteMode mode, sframes_t start_frame)
{
MidiSource::mark_streaming_midi_write_started (mode, start_frame);
Evoral::SMF::begin_write ();

View file

@ -274,7 +274,7 @@ SndFileSource::sample_rate () const
}
nframes_t
SndFileSource::read_unlocked (Sample *dst, nframes_t start, nframes_t cnt) const
SndFileSource::read_unlocked (Sample *dst, sframes_t start, nframes_t cnt) const
{
int32_t nread;
float *ptr;
@ -482,7 +482,7 @@ SndFileSource::destructive_write_unlocked (Sample* data, nframes_t cnt)
}
int
SndFileSource::update_header (nframes_t when, struct tm& now, time_t tnow)
SndFileSource::update_header (sframes_t when, struct tm& now, time_t tnow)
{
set_timeline_position (when);
@ -506,7 +506,7 @@ SndFileSource::flush_header ()
}
int
SndFileSource::setup_broadcast_info (nframes_t when, struct tm& now, time_t tnow)
SndFileSource::setup_broadcast_info (sframes_t when, struct tm& now, time_t tnow)
{
if (!writable()) {
warning << string_compose (_("attempt to store broadcast info in a non-writable audio file source (%1)"), _path) << endmsg;
@ -556,7 +556,7 @@ SndFileSource::set_header_timeline_position ()
}
nframes_t
SndFileSource::write_float (Sample* data, nframes_t frame_pos, nframes_t cnt)
SndFileSource::write_float (Sample* data, sframes_t frame_pos, nframes_t cnt)
{
if (sf_seek (sf, frame_pos, SEEK_SET|SFM_WRITE) < 0) {
char errbuf[256];
@ -572,7 +572,7 @@ SndFileSource::write_float (Sample* data, nframes_t frame_pos, nframes_t cnt)
return cnt;
}
nframes_t
sframes_t
SndFileSource::natural_position() const
{
return _timeline_position;
@ -605,7 +605,7 @@ SndFileSource::clear_capture_marks ()
}
void
SndFileSource::mark_capture_start (nframes_t pos)
SndFileSource::mark_capture_start (sframes_t pos)
{
if (destructive()) {
if (pos < _timeline_position) {
@ -748,7 +748,7 @@ SndFileSource::crossfade (Sample* data, nframes_t cnt, int fade_in)
return cnt;
}
nframes_t
sframes_t
SndFileSource::last_capture_start_frame () const
{
if (destructive()) {

View file

@ -140,7 +140,7 @@ Source::set_state (const XMLNode& node)
}
void
Source::update_length (nframes_t pos, nframes_t cnt)
Source::update_length (sframes_t pos, sframes_t cnt)
{
if (pos + cnt > _length) {
_length = pos + cnt;