FFmpeg-直播推流到服务器、加滤镜效果
具体代码请看:NDKPractice项目的ffmpeg88livepush
ffmpeg测试流媒体服务器播放地址
windows ffmpeg下载地址
mac 和 linux 类似,进入官网下载 。
下面以windows为例测试步骤:
- 进入下载好的ffmpeg解压目录的/bin目录文件夹下
- 使用命令拉流:
ffplay.exe rtmp://192.168.1.20/myapp/mystream
1.录制声音获取 PCM 数据
使用AudioRecord
/**
* 音频的采集线程
*/
private static final class AudioRecordThread extends Thread {
private int mMinBufferSize;
private WeakReference<BaseVideoPush> mVideoRecorderWr;
private volatile boolean mShouldExit = false;
private MediaCodec mAudioCodec;
private MediaCodec.BufferInfo mBufferInfo;
private int mAudioTrackIndex = -1;
private long mAudioPts = 0;
private AudioRecord mAudioRecord; //录音的类
private byte[] mAudioData;// 这是 pcm 数据
public AudioRecordThread(WeakReference<BaseVideoPush> videoRecorderWr) {
this.mVideoRecorderWr = videoRecorderWr;
mAudioCodec = videoRecorderWr.get().mAudioCodec;
mBufferInfo = new MediaCodec.BufferInfo();
mMinBufferSize = AudioRecord.getMinBufferSize(
BaseVideoPush.AUDIO_SAMPLE_RATE,
AudioFormat.CHANNEL_IN_STEREO,//立体声
AudioFormat.ENCODING_PCM_16BIT);
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
BaseVideoPush.AUDIO_SAMPLE_RATE,
AudioFormat.CHANNEL_IN_STEREO,//立体声
AudioFormat.ENCODING_PCM_16BIT,
mMinBufferSize);
mAudioData = new byte[mMinBufferSize];
}
@Override
public void run() {
try {
// 开启录制
mAudioRecord.startRecording();
while (true) {
if (mShouldExit) {
return;
}
// 不断读取 pcm 数据
mAudioRecord.read(mAudioData, 0, mMinBufferSize);
// 把数据写入到 mAudioCodec 的 InputBuffer
int inputBufferIndex = mAudioCodec.dequeueInputBuffer(0);
if (inputBufferIndex >= 0) {
ByteBuffer byteBuffer = mAudioCodec.getInputBuffers()[inputBufferIndex];
byteBuffer.clear();
byteBuffer.put(mAudioData);
// pts 44100 * 2 *2
mAudioPts += mMinBufferSize * 1000000 / BaseVideoPush.AUDIO_SAMPLE_RATE * BaseVideoPush.AUDIO_CHANNELS * 2;
// size 22050*2*2
mAudioCodec.queueInputBuffer(inputBufferIndex, 0, mMinBufferSize, mAudioPts, 0);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
onDestroy();
}
}
private void onDestroy() {
try {
mAudioCodec.stop();
mAudioCodec.release();
} catch (Exception e) {
e.printStackTrace();
}
}
private void requestExit() {
mShouldExit = true;
}
}
2.推送视频画面到媒体房间

为什么要封装成flv:为了不定死 h264协议
flv 封装格式:
// FLV 的封装格式
// frame type : 1关键帧,2 非关键帧(4Bit)
// CodecID : 7表示 AVC(4bit) , 与 frame type组合起来刚好是 1 个字节 0x17
// fixed : 0x00 0x00 0x00 0x00 (4byte)
// configurationVersion (1byte) 0x01版本
// AVCProfileIndication (1byte) sps[1] profile
// profile_compatibility (1byte) sps[2] compatibility
// AVCLevelIndication (1byte) sps[3] Profile level
// lengthSizeMinusOne (1byte) 0xff 包长数据所使用的字节数
// sps + pps 的数据
// sps number (1byte) 0xe1 sps 个数
// sps data length (2byte) sps 长度
// sps data sps 的内容
// pps number (1byte) 0x01 pps 个数
// pps data length (2byte) pps 长度
// pps data pps 的内容
BaseVideoPush:VideoEncoderThread
private static final class VideoEncoderThread extends Thread {
private WeakReference<BaseVideoPush> mVideoRecorderWr;
private volatile boolean mShouldExit = false;
private MediaCodec mVideoCodec;
private MediaCodec.BufferInfo mBufferInfo;
private long mVideoPts = 0;
private byte[] mVideoSps, mVideoPps;
public VideoEncoderThread(WeakReference<BaseVideoPush> videoRecorderWr) {
this.mVideoRecorderWr = videoRecorderWr;
mVideoCodec = videoRecorderWr.get().mVideoCodec;
mBufferInfo = new MediaCodec.BufferInfo();
}
@Override
public void run() {
try {
mVideoCodec.start();
while (true) {
if (mShouldExit) {
return;
}
BaseVideoPush videoRecorder = mVideoRecorderWr.get();
if (videoRecorder == null) {
return;
}
// 代码先不写,先测试,从 surface 上获取数据,编码成 h264 ,通过 MediaMuxer 合成 mp4
int outputBufferIndex = mVideoCodec.dequeueOutputBuffer(mBufferInfo, 0);
if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// 获取 sps 和 pps
// Log.e("TAG", "获取 sps 和 pps");
ByteBuffer byteBuffer = mVideoCodec.getOutputFormat().getByteBuffer("csd-0");
mVideoSps = new byte[byteBuffer.remaining()];
byteBuffer.get(mVideoSps, 0, mVideoSps.length);
// Log.e("sps", bytesToHexString(mVideoSps));
byteBuffer = mVideoCodec.getOutputFormat().getByteBuffer("csd-1");
mVideoPps = new byte[byteBuffer.remaining()];
byteBuffer.get(mVideoPps, 0, mVideoPps.length);
// Log.e("pps", bytesToHexString(mVideoPps));
} else {
while (outputBufferIndex >= 0) {
// 获取数据
ByteBuffer outBuffer = mVideoCodec.getOutputBuffers()[outputBufferIndex];
outBuffer.position(mBufferInfo.offset);
outBuffer.limit(mBufferInfo.offset + mBufferInfo.size);
// 修改 pts
if (mVideoPts == 0) {
mVideoPts = mBufferInfo.presentationTimeUs;
}
mBufferInfo.presentationTimeUs -= mVideoPts;
// 在关键帧之前先把 sps 和 pps 推到流媒体服务器
if(mBufferInfo.flags == MediaCodec.BUFFER_FLAG_KEY_FRAME){
mVideoRecorderWr.get().mLivePush.pushSpsPps(mVideoSps,mVideoSps.length,
mVideoPps,mVideoPps.length);
}
byte[] data = new byte[outBuffer.remaining()];
outBuffer.get(data, 0, data.length);
// 推送视频到流媒体服务器上
mVideoRecorderWr.get().mLivePush.pushVideo(data,data.length,
mBufferInfo.flags == MediaCodec.BUFFER_FLAG_KEY_FRAME);
// 回调当前录制的时间
if (videoRecorder.mRecordListener != null) {
videoRecorder.mRecordListener.onTime(mBufferInfo.presentationTimeUs / 1000);
}
mVideoCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mVideoCodec.dequeueOutputBuffer(mBufferInfo, 0);
}
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
onDestroy();
}
}
private void onDestroy() {
try {
mVideoCodec.stop();
mVideoCodec.release();
} catch (Exception e) {
e.printStackTrace();
}
}
private void requestExit() {
mShouldExit = true;
}
}
void LivePush::pushSpsPps(jbyte *spsData, jint spsLen, jbyte *ppsData, jint ppsLen) {
// FLV 的封装格式
// frame type : 1关键帧,2 非关键帧(4Bit)
// CodecID : 7表示 AVC(4bit) , 与 frame type组合起来刚好是 1 个字节 0x17
// fixed : 0x00 0x00 0x00 0x00 (4byte)
// configurationVersion (1byte) 0x01版本
// AVCProfileIndication (1byte) sps[1] profile
// profile_compatibility (1byte) sps[2] compatibility
// AVCLevelIndication (1byte) sps[3] Profile level
// lengthSizeMinusOne (1byte) 0xff 包长数据所使用的字节数
// sps + pps 的数据
// sps number (1byte) 0xe1 sps 个数
// sps data length (2byte) sps 长度
// sps data sps 的内容
// pps number (1byte) 0x01 pps 个数
// pps data length (2byte) pps 长度
// pps data pps 的内容
// 数据的长度(大小) = sps 大小 + pps 大小 + 头 16字节
int bodySize = spsLen + ppsLen + 16;
// 构建 RTMPPacket
RTMPPacket *pPacket = (RTMPPacket *) malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(pPacket, bodySize);
RTMPPacket_Reset(pPacket);
// 构建 body 按上面的一个一个开始赋值
char *body = pPacket->m_body;
int index = 0;
body[index++] = 0x17;
// fixed : 0x00 0x00 0x00 0x00 (4byte)
body[index++] = 0x00;
body[index++] = 0x00;
body[index++] = 0x00;
body[index++] = 0x00;
// configurationVersion (1byte) 0x01版本
body[index++] = 0x01;
// AVCProfileIndication (1byte) sps[1] profile
body[index++] = spsData[1];
// profile_compatibility (1byte) sps[2] compatibility
body[index++] = spsData[2];
// AVCLevelIndication (1byte) sps[3] Profile level
body[index++] = spsData[3];
// lengthSizeMinusOne (1byte) 0xff 包长数据所使用的字节数
body[index++] = 0xff;
// sps + pps 的数据
// sps number (1byte) 0xe1 sps 个数
body[index++] = 0xe1;
// sps data length (2byte) sps 长度
body[index++] = (spsLen >> 8) & 0xFF;
body[index++] = spsLen & 0xFF;
// sps data sps 的内容
memcpy(&body[index], spsData, spsLen);
index += spsLen;
// pps number (1byte) 0x01 pps 个数
body[index++] = 0x01;
// pps data length (2byte) pps 长度
body[index++] = (ppsLen >> 8) & 0xFF;
body[index++] = ppsLen & 0xFF;
// pps data pps 的内容
memcpy(&body[index], ppsData, ppsLen);
// RTMPPacket 设置一些信息
pPacket->m_hasAbsTimestamp = 0;
pPacket->m_nTimeStamp = 0;
pPacket->m_headerType = RTMP_PACKET_SIZE_MEDIUM;
pPacket->m_packetType = RTMP_PACKET_TYPE_VIDEO;
pPacket->m_nBodySize = bodySize;
pPacket->m_nChannel = 0x04;
pPacket->m_nInfoField2 = this->pRtmp->m_stream_id;
pPacketQueue->push(pPacket);
void LivePush::pushVideoData(jbyte *videoData, jint dataLen, jboolean keyFrame) {
// frame type : 1关键帧,2非关键帧 (4bit)
// CodecID: 7表示 AVC(4bit),与 frame type 组合起来刚好是 1 个字节 0x17
// fixed:0x01 0x00 0x00 0x00 (4byte),0x01 表示 NALU 单元
// video data length (4byte) video长度
// video data
// 数据的长度(大小) = dataLen + 头 9字节
int bodySize = dataLen + 9;
// 构建 RTMPPacket
RTMPPacket *pPacket = (RTMPPacket *) malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(pPacket, bodySize);
RTMPPacket_Reset(pPacket);
// 构建 body 按上面的一个一个开始赋值
char *body = pPacket->m_body;
int index = 0;
// frame type : 1关键帧,2非关键帧 (4bit)
// CodecID: 7表示 AVC(4bit),与 frame type 组合起来刚好是 1 个字节 0x17
if (keyFrame)
body[index++] = 0x17;
else
body[index++] = 0x27;
// fixed:0x01 0x00 0x00 0x00 (4byte),0x01 表示 NALU 单元
body[index++] = 0x01;
body[index++] = 0x00;
body[index++] = 0x00;
body[index++] = 0x00;
// video data length (4byte) video长度
body[index++] = (dataLen >> 24) & 0xFF;
body[index++] = (dataLen >> 16) & 0xFF;
body[index++] = (dataLen >> 8) & 0xFF;
body[index++] = dataLen & 0xFF;
// video data
memcpy(&body[index], videoData, dataLen);
// RTMPPacket 设置一些信息
pPacket->m_headerType = RTMP_PACKET_SIZE_LARGE;
pPacket->m_packetType = RTMP_PACKET_TYPE_VIDEO;
pPacket->m_hasAbsTimestamp = 0;
pPacket->m_nTimeStamp = RTMP_GetTime() - startTime;
pPacket->m_nBodySize = bodySize;
pPacket->m_nChannel = 0x04;
pPacket->m_nInfoField2 = this->pRtmp->m_stream_id;
pPacketQueue->push(pPacket);
}
3.推送声音到媒体房间
/**
* 音频的编码线程
*/
private static final class AudioEncoderThread extends Thread {
private WeakReference<BaseVideoPush> mVideoRecorderWr;
private volatile boolean mShouldExit = false;
private MediaCodec mAudioCodec;
private MediaCodec.BufferInfo mBufferInfo;
private long mAudioPts = 0;
public AudioEncoderThread(WeakReference<BaseVideoPush> videoRecorderWr) {
this.mVideoRecorderWr = videoRecorderWr;
mAudioCodec = videoRecorderWr.get().mAudioCodec;
mBufferInfo = new MediaCodec.BufferInfo();
}
@Override
public void run() {
try {
// 开启 start AudioCodec
mAudioCodec.start();
while (true) {
if (mShouldExit) {
return;
}
BaseVideoPush videoRecorder = mVideoRecorderWr.get();
if (videoRecorder == null) {
return;
}
// 获取音频数据,那这个音频数据从哪里来?音乐播放器里面来,pcm 数据
int outputBufferIndex = mAudioCodec.dequeueOutputBuffer(mBufferInfo, 0);
while (outputBufferIndex >= 0) {
// 获取数据
ByteBuffer outBuffer = mAudioCodec.getOutputBuffers()[outputBufferIndex];
outBuffer.position(mBufferInfo.offset);
outBuffer.limit(mBufferInfo.offset + mBufferInfo.size);
// 修改 pts
if (mAudioPts == 0) {
mAudioPts = mBufferInfo.presentationTimeUs;
}
mBufferInfo.presentationTimeUs -= mAudioPts;
// 打印一下音频的 AAC 数据
byte[] data = new byte[outBuffer.remaining()];
outBuffer.get(data, 0, data.length);
// 推送音频数据到流媒体
mVideoRecorderWr.get().mLivePush.pushAudio(data,data.length);
mAudioCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mAudioCodec.dequeueOutputBuffer(mBufferInfo, 0);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
onDestroy();
}
}
private void onDestroy() {
try {
mAudioCodec.stop();
mAudioCodec.release();
} catch (Exception e) {
e.printStackTrace();
}
}
private void requestExit() {
mShouldExit = true;
}
}
void LivePush::pushAudioData(jbyte *audioData, jint dataLen) {
// 2个字节头信息:
// 前四位表示音频数据格式 AAC :十进制的10(A) -> 二进制:1010 -> 十六进制:A
// 五六位表示采样率 十进制:0=5.5k 十进制:1=11k 十进制:2=22k 十进制:3(二进制:11)=44k
// 七位表示采样的精度 0=8bits 1=16bits
// 八位表示音频类型 0=mono 1=stereo
// 我们这里算出来第一个字节是 0xAF = 1010 1111
// 0x01 代表 aac 原始数据
// 数据的长度(大小) = dataLen + 2
int bodySize = dataLen + 2;
// 构建 RTMPPacket
RTMPPacket *pPacket = (RTMPPacket *) malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(pPacket, bodySize);
RTMPPacket_Reset(pPacket);
// 构建 body 按上面的一个一个开始赋值
char *body = pPacket->m_body;
int index = 0;
body[index++] = 0xAF;
// 0x01 代表 aac 原始数据
body[index++] = 0x01;
// audio data
memcpy(&body[index], audioData, dataLen);
// RTMPPacket 设置一些信息
pPacket->m_headerType = RTMP_PACKET_SIZE_LARGE;
pPacket->m_packetType = RTMP_PACKET_TYPE_AUDIO;
pPacket->m_hasAbsTimestamp = 0;
pPacket->m_nTimeStamp = RTMP_GetTime() - startTime;
pPacket->m_nBodySize = bodySize;
pPacket->m_nChannel = 0x04;
pPacket->m_nInfoField2 = this->pRtmp->m_stream_id;
pPacketQueue->push(pPacket);
}
4.滤镜效果处理
// 设置纹理渲染
/**
* |---------------------------------------------------------------------------------------------------------------|
* @description: 灰色视频的推流
* @author: jamin
* @date: 2020/10/20 13:39
* |---------------------------------------------------------------------------------------------------------------|
*/
public class GrayVideoPush extends BaseVideoPush {
private PushRenderer mPushRenderer;
public GrayVideoPush(Context context, EGLContext eglContext, int textureId) {
super(context, eglContext);
mPushRenderer = new PushRenderer(context, textureId);
setRenderer(mPushRenderer);
mPushRenderer.setFragmentRender(R.raw.filter_fragment_gray);
}
}
版权声明:本文为q925092273原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。