mirror of
https://github.com/Ardour/ardour.git
synced 2025-12-10 16:46:35 +01:00
partially revert 30b087ab3, CoreAudio SDK uses "Frames"
This commit is contained in:
parent
c8a9b28d3b
commit
1ec471c961
2 changed files with 8 additions and 8 deletions
|
|
@ -752,7 +752,7 @@ static void PrintStreamDesc (AudioStreamBasicDescription *inDesc)
|
||||||
printf (" Format ID:%.*s\n", (int)sizeof(inDesc->mFormatID), (char*)&inDesc->mFormatID);
|
printf (" Format ID:%.*s\n", (int)sizeof(inDesc->mFormatID), (char*)&inDesc->mFormatID);
|
||||||
printf (" Format Flags:%X\n", (unsigned int)inDesc->mFormatFlags);
|
printf (" Format Flags:%X\n", (unsigned int)inDesc->mFormatFlags);
|
||||||
printf (" Bytes per Packet:%d\n", (int)inDesc->mBytesPerPacket);
|
printf (" Bytes per Packet:%d\n", (int)inDesc->mBytesPerPacket);
|
||||||
printf (" Samples per Packet:%d\n", (int)inDesc->mSamplesPerPacket);
|
printf (" Frames per Packet:%d\n", (int)inDesc->mFramesPerPacket);
|
||||||
printf (" Bytes per Frame:%d\n", (int)inDesc->mBytesPerFrame);
|
printf (" Bytes per Frame:%d\n", (int)inDesc->mBytesPerFrame);
|
||||||
printf (" Channels per Frame:%d\n", (int)inDesc->mChannelsPerFrame);
|
printf (" Channels per Frame:%d\n", (int)inDesc->mChannelsPerFrame);
|
||||||
printf (" Bits per Channel:%d\n", (int)inDesc->mBitsPerChannel);
|
printf (" Bits per Channel:%d\n", (int)inDesc->mBitsPerChannel);
|
||||||
|
|
@ -895,7 +895,7 @@ CoreAudioPCM::pcm_start (
|
||||||
srcFormat.mFormatID = kAudioFormatLinearPCM;
|
srcFormat.mFormatID = kAudioFormatLinearPCM;
|
||||||
srcFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
|
srcFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
|
||||||
srcFormat.mBytesPerPacket = sizeof(float);
|
srcFormat.mBytesPerPacket = sizeof(float);
|
||||||
srcFormat.mSamplesPerPacket = 1;
|
srcFormat.mFramesPerPacket = 1;
|
||||||
srcFormat.mBytesPerFrame = sizeof(float);
|
srcFormat.mBytesPerFrame = sizeof(float);
|
||||||
srcFormat.mChannelsPerFrame = chn_in;
|
srcFormat.mChannelsPerFrame = chn_in;
|
||||||
srcFormat.mBitsPerChannel = 32;
|
srcFormat.mBitsPerChannel = 32;
|
||||||
|
|
@ -903,8 +903,8 @@ CoreAudioPCM::pcm_start (
|
||||||
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, AUHAL_INPUT_ELEMENT, &srcFormat, sizeof(AudioStreamBasicDescription));
|
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, AUHAL_INPUT_ELEMENT, &srcFormat, sizeof(AudioStreamBasicDescription));
|
||||||
if (err != noErr) { errorMsg="kAudioUnitProperty_StreamFormat, Output"; _state = -6; goto error; }
|
if (err != noErr) { errorMsg="kAudioUnitProperty_StreamFormat, Output"; _state = -6; goto error; }
|
||||||
|
|
||||||
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_MaximumSamplesPerSlice, kAudioUnitScope_Global, AUHAL_INPUT_ELEMENT, (UInt32*)&_samples_per_period, sizeof(UInt32));
|
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, AUHAL_INPUT_ELEMENT, (UInt32*)&_samples_per_period, sizeof(UInt32));
|
||||||
if (err != noErr) { errorMsg="kAudioUnitProperty_MaximumSamplesPerSlice, Input"; _state = -6; goto error; }
|
if (err != noErr) { errorMsg="kAudioUnitProperty_MaximumFramesPerSlice, Input"; _state = -6; goto error; }
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chn_out > 0) {
|
if (chn_out > 0) {
|
||||||
|
|
@ -912,7 +912,7 @@ CoreAudioPCM::pcm_start (
|
||||||
dstFormat.mFormatID = kAudioFormatLinearPCM;
|
dstFormat.mFormatID = kAudioFormatLinearPCM;
|
||||||
dstFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
|
dstFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
|
||||||
dstFormat.mBytesPerPacket = sizeof(float);
|
dstFormat.mBytesPerPacket = sizeof(float);
|
||||||
dstFormat.mSamplesPerPacket = 1;
|
dstFormat.mFramesPerPacket = 1;
|
||||||
dstFormat.mBytesPerFrame = sizeof(float);
|
dstFormat.mBytesPerFrame = sizeof(float);
|
||||||
dstFormat.mChannelsPerFrame = chn_out;
|
dstFormat.mChannelsPerFrame = chn_out;
|
||||||
dstFormat.mBitsPerChannel = 32;
|
dstFormat.mBitsPerChannel = 32;
|
||||||
|
|
@ -920,8 +920,8 @@ CoreAudioPCM::pcm_start (
|
||||||
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, AUHAL_OUTPUT_ELEMENT, &dstFormat, sizeof(AudioStreamBasicDescription));
|
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, AUHAL_OUTPUT_ELEMENT, &dstFormat, sizeof(AudioStreamBasicDescription));
|
||||||
if (err != noErr) { errorMsg="kAudioUnitProperty_StreamFormat Input"; _state = -5; goto error; }
|
if (err != noErr) { errorMsg="kAudioUnitProperty_StreamFormat Input"; _state = -5; goto error; }
|
||||||
|
|
||||||
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_MaximumSamplesPerSlice, kAudioUnitScope_Global, AUHAL_OUTPUT_ELEMENT, (UInt32*)&_samples_per_period, sizeof(UInt32));
|
err = AudioUnitSetProperty(_auhal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, AUHAL_OUTPUT_ELEMENT, (UInt32*)&_samples_per_period, sizeof(UInt32));
|
||||||
if (err != noErr) { errorMsg="kAudioUnitProperty_MaximumSamplesPerSlice, Output"; _state = -5; goto error; }
|
if (err != noErr) { errorMsg="kAudioUnitProperty_MaximumFramesPerSlice, Output"; _state = -5; goto error; }
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read back stream descriptions */
|
/* read back stream descriptions */
|
||||||
|
|
|
||||||
|
|
@ -126,7 +126,7 @@ public:
|
||||||
AudioUnitRenderActionFlags* ioActionFlags,
|
AudioUnitRenderActionFlags* ioActionFlags,
|
||||||
const AudioTimeStamp* inTimeStamp,
|
const AudioTimeStamp* inTimeStamp,
|
||||||
UInt32 inBusNumber,
|
UInt32 inBusNumber,
|
||||||
UInt32 inNumberSamples,
|
UInt32 inNumberFrames,
|
||||||
AudioBufferList* ioData);
|
AudioBufferList* ioData);
|
||||||
|
|
||||||
void xrun_callback ();
|
void xrun_callback ();
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue