转自
使用AudioQueue来实现音频播放功能时最主要的步骤,可以更简练的归纳如下。
1.
2.
3.
4.
5.
6.
1.playAudio.h
声明了一个Objective-C类
1.playAudio.h
声明了一个Objective-C类
1.播放音乐

//
// playAudio.h
// ffmpegPlayAudio
//
// Created by infomedia xuanyuanchen on 12-3-26.
// Copyright (c) 2012年 xuanyuanchen. All rights reserved.
//
#import
#import
#import
#define NUM_BUFFERS 3
@interface playAudio : NSObject{
//播放音频文件ID
AudioFileID audioFile;
//音频流描述对象
AudioStreamBasicDescript ion dataFormat;
//音频队列
AudioQueueRef queue;
SInt64 packetIndex;
UInt32 numPacketsToRead;
UInt32 bufferByteSize;
AudioStreamPacketDescrip tion *packetDescs;
AudioQueueBufferRef buffers[NUM_BUFFERS];
}
//定义队列为实例属性
@property AudioQueueRef queue;
//播放方法定义
-(id)initWithAudio:(NSString *) path;
//定义缓存数据读取方法
-(void) audioQueueOutputWithQueu e:(AudioQueueRef)audioQueue
queueBuffer:(AudioQueueBufferRef)audioQueueBuffer;
-(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer;
//定义回调(Callback)函数
static void BufferCallack(void *inUserData,AudioQueueRef inAQ,
AudioQueueBufferRef buffer);
@end

2.playAudio.m
playAudio的实现

// // playAudio.m // ffmpegPlayAudio // // Created by infomedia infomedia on 12-3-26. // Copyright (c) 2012年 infomedia. All rights reserved. // #import "playAudio.h"
//实际测试中发现,这个gBufferSizeBytes=0x10000;对于压缩的音频格式(mp3/aac等)没有任何问题,但是如果输入的音频文件格式是wav,会出现只播放几秒便暂停的现象;而手机又不可能去申请更大的内存去处理wav文件,不知到大家能有什么好的方法给点建议
static UInt32 gBufferSizeBytes=0x10000;//It muse be pow(2,x)
@implementation playAudio
@synthesize queue;
//回调函数(Callback)的实现
static void BufferCallback(void *inUserData,AudioQueueRef inAQ,
AudioQueueBufferRef buffer){
playAudio* player=(__bridge playAudio*)inUserData;
[player audioQueueOutputWithQueu e:inAQ queueBuffer:buffer];
}
//缓存数据读取方法的实现
-(void) audioQueueOutputWithQueu e:(AudioQueueRef)audioQueue queueBuffer:(AudioQueueBufferRef)audioQueueBuffer{
OSStatus status;
//读取包数据
UInt32 numBytes;
UInt32 numPackets=numPacketsToRead;
status = AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs, packetIndex,&numPackets, audioQueueBuffer->mAudioData);
//成功读取时
if (numPackets>0) {
//将缓冲的容量设置为与读取的音频数据一样大小(确保内存空间)
audioQueueBuffer->mAudioDataByteSize=numBytes;
//完成给队列配置缓存的处理
status = AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffer, numPackets, packetDescs);
//移动包的位置
packetIndex += numPackets;
}
}
//音频播放的初始化、实现
//在ViewController中声明一个PlayAudio对象,并用下面的方法来初始化
//self.audio=[[playAudioalloc]initWithAudio:@"/Users/xuanyuanchen/audio/daolang.mp3"];
-(id) initWithAudio:(NSString *)path{
if (!(self=[super init])) return nil;
UInt32 size,maxPacketSize;
char *cookie;
int i;
OSStatus status;
//打开音频文件
status=AudioFileOpenURL((CFURLRef)[NSURL fileURLWithPath:path], kAudioFileReadPermission , 0, &audioFile);
if (status != noErr) {
//错误处理
NSLog(@"*** Error *** PlayAudio - play:Path: could not open audio file. Path given was: %@", path);
return nil;
}
for (int i=0; i音频数据格式
size = sizeof(dataFormat);
AudioFileGetProperty(audioFile, kAudioFilePropertyDataFo rmat, &size, &dataFormat);
//创建播放用的音频队列
AudioQueueNewOutput(&dataFormat, BufferCallback, self,
nil, nil, 0, &queue);
//计算单位时间包含的包数
if (dataFormat.mBytesPerPacket==0 || dataFormat.mFramesPerPacket==0) {
size=sizeof(maxPacketSize);
AudioFileGetProperty(audioFile, kAudioFilePropertyPacket SizeUpperBound, &size, &maxPacketSize);
if (maxPacketSize > gBufferSizeBytes) {
maxPacketSize= gBufferSizeBytes;
}
//算出单位时间内含有的包数
numPacketsToRead = gBufferSizeBytes/maxPacketSize;
packetDescs=malloc(sizeof(AudioStreamPacketDescrip tion)*numPacketsToRead);
}else {
numPacketsToRead= gBufferSizeBytes/dataFormat.mBytesPerPacket;
packetDescs=nil;
}
//设置Magic Cookie,参见第二十七章的相关介绍
AudioFileGetProperty(audioFile, kAudioFilePropertyMagicC ookieData, &size, nil);
if (size >0) {
cookie=malloc(sizeof(char)*size);
AudioFileGetProperty(audioFile, kAudioFilePropertyMagicC ookieData, &size, cookie);
AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, cookie, size);
}
//创建并分配缓冲空间
packetIndex=0;
for (i=0; i队列处理开始,此后系统开始自动调用回调(Callback)函数
AudioQueueStart(queue, nil);
return self;
}
-(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer {
UInt32 numBytes,numPackets;
//从文件中接受数据并保存到缓存(buffer)中
numPackets = numPacketsToRead;
AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs, packetIndex, &numPackets, buffer->mAudioData);
if(numPackets >0){
buffer->mAudioDataByteSize=numBytes;
AudioQueueEnqueueBuffer(queue, buffer, (packetDescs ? numPackets : 0), packetDescs);
packetIndex += numPackets;
}
else{
return 1;//意味着我们没有读到任何的包
}
return 0;//0代表正常的退出
}
@end
2.录音
(1.)record.h
#import
#import
#import
@interface Record : NSObject
{
AudioStreamBasicDescript
AudioQueueRef
AudioQueueBufferRef
UInt32
AVAudioPlayer *player;
}
@property(nonatomic,assign)bool
@property(nonatomic,retain) NSMutableData*recordData;
-(void)record;
-(void)stop;
-(void)play;
-(void)pause;
-(void)dealWithData:(NSData*)data;
@end
(2.)record.m
#import
@synthesize mIsRunning = _mIsRunning;
@synthesize recordData =_recordData;
-(id)init
{
self = [super init];
if (self) {
AudioSessionInitialize(NULL, NULL, NULL, (__bridgevoid*)self);
// self.recordData =[NSMutableData data];
self.mIsRunning=false;
}
return self;
}
static void HandleInputBuffer (
void
AudioQueueRef
AudioQueueBufferRef
constAudioTimeStamp
UInt32
constAudioStreamPacketDescrip
) {
Record *recorderPro = (__bridge Record*)inUserData;
if (inNumPackets > 0 &&recorderPro.mIsRunning){
int pcmSize = inBuffer->mAudioDataByteSize;
char *pcmData = (char *)inBuffer->mAudioData;
NSData *data = [[NSData alloc] initWithBytes:pcmDatalength:pcmSize];
[recorderPro dealWithData:data];
//
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
}
void DeriveBufferSize (
AudioQueueRef
AudioStreamBasicDescript
Float64
UInt32
) {
static const int maxBufferSize = 0x50000;
int maxPacketSize =ASBDescription.mBytesPerPacket;
if (maxPacketSize == 0) {
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
AudioQueueGetProperty (
audioQueue,
kAudioQueueProperty_MaximumOutputPacketSize,
&maxPacketSize,
&maxVBRPacketSize
);
}
Float64 numBytesForTime =
ASBDescription.mSampleRate * maxPacketSize *seconds;
*outBufferSize =
UInt32 (numBytesForTime < maxBufferSize ?
numBytesForTime : maxBufferSize);
}
-(void)dealWithData:(NSData*)data
{
[self changeVoice:data];
}
-(void)changeVoice:(NSData*)audioData
{
soundtouch::SoundTouch mSoundTouch;
mSoundTouch.setSampleRate(16000);
mSoundTouch.setChannels(1);
mSoundTouch.setTempoChange(0.05);
mSoundTouch.setPitchSemiTones(12);
mSoundTouch.setRateChange(-0.7);
mSoundTouch.setSetting(SETTING_SEQUENCE_MS, 40);
mSoundTouch.setSetting(SETTING_SEEKWINDOW_MS, 16);
mSoundTouch.setSetting(SETTING_OVERLAP_MS, 8);
if (audioData != nil) {
char *pcmData = (char *)audioData.bytes;
int pcmSize = audioData.length;
int nSamples = pcmSize / 2;
mSoundTouch.putSamples((short *)pcmData,nSamples);
short *samples = new short[pcmSize];
int numSamples = 0;
do {
memset(samples, 0, pcmSize);
numSamples = mSoundTouch.receiveSamples(samples,pcmSize);
[self.recordData appendBytes:sampleslength:numSamples*2];
} while (numSamples > 0);
delete [] samples;
}
NSLog(@"-------recording%d",self.recordData.length);
}
- (void) setupAudioFormat:(UInt32) inFormatID SampleRate:(int)sampeleRate
{
memset(&mDataFormat, 0, sizeof(mDataFormat));
mDataFormat.mSampleRate = sampeleRate;
//UInt32 size =sizeof(mDataFormat.mChannelsPerFrame);
//AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumb
mDataFormat.mChannelsPerFrame=1;
mDataFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM){
// if we want pcm, default to signed 16-bitlittle-endian
mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSi
mDataFormat.mBitsPerChannel = 16;
mDataFormat.mBytesPerPacket = mDataFormat.mBytesPerFrame =(mDataFormat.mBitsPerChannel / 8) *mDataFormat.mChannelsPerFrame;
mDataFormat.mFramesPerPacket = 1;
}
}
-(void)record
{
self.recordData = [NSMutableData data];
AudioSessionSetActive(true);
// category
UInt32 category =kAudioSessionCategory_PlayAndRecord;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,sizeof(category), &category);
// format
[self setupAudioFormat:kAudioFormatLinearPCMSampleRate:16000];
DeriveBufferSize(mQueue,mDataFormat,0.5,&bufferByteSize);
// 设置回调函数
AudioQueueNewInput(&mDataFormat, HandleInputBuffer, (__bridgevoid*)self, NULL, NULL, 0, &mQueue);
for (int i = 0; i < kNumberBuffers; ++i) {
AudioQueueAllocateBuffer
AudioQueueEnqueueBuffer(mQueue,mBuffers[i],0,NULL);
}
// 开始 录音
AudioQueueStart(mQueue, NULL);
self. mIsRunning= YES;
}
-(void)stop
{
AudioQueueFlush(mQueue);
AudioQueueStop (mQueue,true);
NSMutableData *wavDatas = [[NSMutableData alloc]init];
int fileLength = self.recordData.length;
void *header = createWaveHeader(fileLength, 1, 16000,16);
[wavDatas appendBytes:header length:44];
[wavDatas appendData:self.recordData];
self.recordData = wavDatas;
NSLog(@"-------stop%d",self.recordData.length);
//
//
//
self.mIsRunning = false;
}
-(void)play
{
NSError *playerError;
player = [[AVAudioPlayer alloc] initWithData:self.recordDataerror:&playerError];
//
//
// player = [[AVAudioPlayer alloc]initWithContentsOfURL:recordedFileerror:&playerError];
//
if (player == nil)
{
NSLog(@"ERror creating player: %@", [playerErrordescription]);
}
player.delegate = self;
if(![player isPlaying]) {
[player play];
}
}
-(void)pause
{
if ([player isPlaying]) {
[player pause];
}
}
- (void)audioPlayerDidFinishPlay
{
NSLog(@"%@",@"audioPlayerDidFinishPlay
}
@end