Android RTMP协议直播之使用FAAC&X264进行音视频编码

Android RTMP协议直播之C/C++下POSIX多线程编程中对POSIX多线程进行了讲解,相信读者对POSIX有一定了解。本文将通过POSIX进行音视频编码,在博客开始的时候就对H264编码进行了讲述,本次将使用H264标准下x264进行视频编码,使用AAC标准下FAAC进行音频编码。

直播工程结构

再次贴出本次直播项目的结构图

使用X264进行视频编码

Android RTMP协议直播之音视频采集中实现了JAVA视频采集,回顾之前的视频采集流程,通过camera进行摄像头预览并将预览数据通过NativePush发送至Native层进行编码推流。

1
2
3
4
5
6
7
8
9
10
11
12
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
if (mCamera != null) {
mCamera.addCallbackBuffer(callbackBuffer);
}

if (isPushing) {
//调用NativePush.sendVideo将视频数据发送到Native进行处理
builder.getNativePush().sendVideo(data);
}

}

NativePush.sendVideo方法是一个native方法

1
public native void sendVideo(byte[] data);

该native方法在Native中的实现如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#include <jni.h>
#include <android/log.h>
#include <stdlib.h>
#include "include/x264/x264.h"
#include "include/rtmp/rtmp.h"
#include "include/queue.h"
#include <pthread.h>
#include "include/faac/faac.h"

/**
* @param env
* @param instance
* @param data_
*/
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_sendVideo(JNIEnv *env, jobject instance, jbyteArray data_) {
jbyte *data = (*env)->GetByteArrayElements(env, data_, NULL);
//将NV21格式数据转换为YUV420
//NV21转YUV420p的公式:(Y不变)Y=Y,U=Y+1+1,V=Y+1
jbyte *y = pic.img.plane[0];
jbyte *u = pic.img.plane[1];
jbyte *v = pic.img.plane[2];
//设置y
memcpy(y, data, y_len);
//设置u,v
for (int i = 0; i < u_len; ++i) {
*(u + i) = *(data + y_len + i * 2 + 1);
*(v + i) = *(data + y_len + i * 2);
}

//使用x264编码
x264_nal_t *nal = NULL;
int n_nal = -1;
if (x264_encoder_encode(x264_encoder, &nal, &n_nal, &pic, &pic_out) < 0) {
LOGE("%s", "x264 encode error");
return;
}

//设置SPS PPS
unsigned char sps[SPS_OUT_BUFFER_SIZE];
unsigned char pps[PPS_OUT_BUFFER_SIZE];
int sps_length, pps_length;
//reset
memset(sps, 0, SPS_OUT_BUFFER_SIZE);
memset(pps, 0, PPS_OUT_BUFFER_SIZE);

pic.i_pts += 1; //顺序累加
for (int i = 0; i < n_nal; ++i) {
if (nal[i].i_type == NAL_SPS) {
//00 00 00 01;07;payload
//不复制四字节起始码,设置sps_length的长度为总长度-四字节起始码长度
sps_length = nal[i].i_payload - 4;
//复制sps数据
memcpy(sps, nal[i].p_payload + 4, sps_length);
} else if (nal[i].i_type == NAL_PPS) {
pps_length = nal[i].i_payload - 4;
memcpy(pps, nal[i].p_payload + 4, pps_length);

//发送视频序列消息
add_squence_header_to_rtmppacket(sps, pps, sps_length, pps_length);
} else {
//发送帧信息
add_frame_body_to_rtmppacket(nal[i].p_payload, nal[i].i_payload);
}

}


(*env)->ReleaseByteArrayElements(env, data_, data, 0);
}

代码不多主要做了三件事

  1. 将NV21格式转换为YUV420格式
  2. 使用X264进行编码
  3. 将编码后的数据进行包装添加到队列中

在进行编解码之前先对Android RTMP协议直播之音视频采集文中的x264相关配置初始化作出补充,贴出之前的配置代码。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_setNativeVideoOptions(JNIEnv *env, jobject instance,
jint width, jint height, jint bitrate,
jint fps) {
LOGI("%s", "setNativeVideoOptions...");

//x264提供了多种配置,这里选择0延迟
x264_param_default_preset(&param, "ultrafast", "zerolatency");
//YUV420
param.i_csp = X264_CSP_I420;
//这里的宽高需要同JAVA Camera中预览的宽高同步,否则会出现分屏的情况
param.i_width = width;
param.i_height = height;

//设置yuv长度
y_len = width * height;
u_len = y_len / 4;
v_len = u_len;

//码率控制,CQP(恒定质量),CRF(恒定码率),ABR(平均码率)
param.rc.i_rc_method = X264_RC_CRF;
//码率 单位(Kbps)
param.rc.i_bitrate = bitrate / 1000;
//瞬时最大码率
param.rc.i_vbv_max_bitrate = bitrate / 1000 * 1.2;
//通过fps控制码率,
param.b_vfr_input = 0;
//帧率分子
param.i_fps_num = fps;
//帧率分母
param.i_fps_den = 1;
param.i_timebase_den = param.i_fps_num;
param.i_timebase_num = param.i_fps_den;
//是否把SPS PPS放入每个关键帧,提高纠错能力
param.b_repeat_headers = 1;
//设置level级别,5.1
param.i_level_idc = 51;

//设置档次
x264_param_apply_profile(&param, "baseline");

//@pic x264操作句柄
x264_picture_alloc(&pic, param.i_csp, param.i_width, param.i_height);
x264_encoder = x264_encoder_open(&param);
if (x264_encoder) {
LOGI("initVideoOptions:%s", "success");
} else {
LOGE("initVideoOptions:%s", "failed");
}

}

在本系列文章开始之初就对H264标准做出了详解,上边代码出现了如下代码注意这些配置对直播相关具有很重要的影响

1
param.b_repeat_headers = 1;

Android RTMP协议直播之H264标准(一)文中对SPS以及PPS进行了简述,这里配置x264的时候选择将SPS和PPS放入每个关键帧,其目的在于利用了SPS以及PPS的特性(具体含义这里不做解释,请参考之前的文章)来提高纠错能力。

1
2
param.i_level_idc = 51;
x264_param_apply_profile(&param, "baseline");

对level以及profile的配置会直接影响到直播清晰度等,具体含义参考Android RTMP协议直播之H264标准(二)

将NV21格式转换为YUV420格式

YUV介绍

YUV是一种颜色编码方法。常使用在各个影像处理组件中。 YUV在对照片或影片编码时,考虑到人类的感知能力,允许降低色度的带宽(可以脑补小时候的黑白电视机)。其中Y’代表明亮度(luma; brightness)而U与V存储色度(色讯; chrominance; color)部分;

YUV Formats分成两个格式:

  • 紧缩格式(packed formats):将Y、U、V值存储成Macro Pixels数组,和RGB的存放方式类似。
  • 平面格式(planar formats):将Y、U、V的三个分量分别存放在不同的矩阵中。

常见的YUV格式
为节省带宽起见,大多数YUV格式平均使用的每像素位数都少于24位。主要的抽样(subsample)格式有YCbCr 4:2:0、YCbCr 4:2:2、YCbCr 4:1:1和YCbCr 4:4:4。YUV的表示法称为A:B:C表示法

  • 4:4:4表示完全取样。
  • 4:2:2表示2:1的水平取样,垂直完全采样。
  • 4:2:0表示2:1的水平取样,垂直2:1采样。
  • 4:1:1表示4:1的水平取样,垂直完全采样。

最常用Y:UV记录的比重通常1:1或2:1,DVD-Video是以YUV 4:2:0的方式记录,也就是我们俗称的I420,YUV4:2:0并不是说只有U(即Cb), V(即Cr)一定为0,而是指U:V互相援引,时见时隐,也就是说对于每一个行,只有一个U或者V分量,如果一行是4:2:0的话,下一行就是4:0:2,再下一行是4:2:0…以此类推。至于其他常见的YUV格式有YUY2、YUYV、YVYU、UYVY、AYUV、Y41P、Y411、Y211、IF09、IYUV、YV12、YVU9、YUV411、YUV420等。

NV21
NV21与YUV420p都属于YUV420格式,每四个Y共用一组UV分量。区别是UV分量的空间排列不同。

NV21的颜色空间排列 :YYYYYYYY VUVU
YUV420p的颜色空间排列:YYYYYYYY UVUV
NV21转YUV420p的公式:(Y不变)Y=Y,U=Y+1+1,V=Y+1

NV21 -> YUV420

1
2
3
4
5
6
7
8
9
10
11
12
//将NV21格式数据转换为YUV420
//NV21转YUV420p的公式:(Y不变)Y=Y,U=Y+1+1,V=Y+1
jbyte *y = pic.img.plane[0];//使用X264中的Y缓冲区
jbyte *u = pic.img.plane[1];//使用X264中的U缓冲区
jbyte *v = pic.img.plane[2];//使用X264中的V缓冲区
//因为YUV420和NV21的Y是一样的,所以这里直接copyY到目标缓冲区
memcpy(y, data, y_len);
//计算uv
for (int i = 0; i < u_len; ++i) {
*(u + i) = *(data + y_len + i * 2 + 1);
*(v + i) = *(data + y_len + i * 2);
}

使用X264进行编码

对X264进行初始化配置以后就可以使用X264进行视频编码了

1
2
3
4
5
6
7
8
//...  
x264_nal_t *nal = NULL;
int n_nal = -1; //nal单元数
if (x264_encoder_encode(x264_encoder, &nal, &n_nal, &pic, &pic_out) < 0) {
LOGE("%s", "x264 encode error");
return;
}
//...

使用x264_encoder_encode函数将刚刚处理的YUV420数据进行编码处理

将编码后的数据进行包装添加到队列中

在对YUV420进行编码后就可以解析NAL单元数据并添加到队列中了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
 //设置SPS PPS
unsigned char sps[SPS_OUT_BUFFER_SIZE];
unsigned char pps[PPS_OUT_BUFFER_SIZE];
int sps_length, pps_length;
//reset
memset(sps, 0, SPS_OUT_BUFFER_SIZE);
memset(pps, 0, PPS_OUT_BUFFER_SIZE);

pic.i_pts += 1;//逐帧
for (int i = 0; i < n_nal; ++i) {
if (nal[i].i_type == NAL_SPS) {
//00 00 00 01;07;payload
//不复制四字节起始码,设置sps_length的长度为总长度-四字节起始码长度
sps_length = nal[i].i_payload - 4;
//复制sps数据
memcpy(sps, nal[i].p_payload + 4, sps_length);
} else if (nal[i].i_type == NAL_PPS) {
pps_length = nal[i].i_payload - 4;
memcpy(pps, nal[i].p_payload + 4, pps_length);

//发送视频序列消息
add_squence_header_to_rtmppacket(sps, pps, sps_length, pps_length);
} else {
//发送帧信息
add_frame_body_to_rtmppacket(nal[i].p_payload, nal[i].i_payload);
}

}

关注代码行9这一行代码,回顾我们初始化X264的时候设置的profile为baseline,当profile为baseline的时候是不存在B帧的,所以在没有B帧的时候DTS和PTS是一致的,这里每次累计加一就是表示直播解码的时候逐帧解码渲染。关于profile和IBP帧以及DTS和PTS请参阅Android RTMP协议直播之H264标准(二)

使用FAAC进行音频编码

和视频编码类似,当开启直播的时候会初始化AAC相关配置然后在JAVA端通过AudioRecord采集音频数据然后发送给Native进行音频编码。
AudioPusher

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
//...
@Override
public void startPush() {
isPushing = true;
builder.getNativePush().setNativeAudioOptions(builder.getSampleRateInHz(), builder.getChannelConfig());
mAudioThread = new Thread(new AudioPushTask());
mAudioThread.start();
}
//...
private class AudioPushTask implements Runnable {

@Override
public void run() {
audioRecord.startRecording();

while (isPushing && audioRecord != null) {
byte[] buffer = new byte[bufferSize];
int len = audioRecord.read(buffer, 0, bufferSize);
if (len > 0) {
builder.getNativePush().sendAudio(buffer, 0, len);
}
}
}
}

当开始直播时,将会通过setNativeAudioOptions初始化AAC音频编码相关配置,该native函数在C中的实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_setNativeAudioOptions(JNIEnv *env, jobject instance,
jint sampleRateInHz, jint channel) {
faacEncodeHandle = faacEncOpen(sampleRateInHz, channel, &inputSamples, &maxOutputBytes);
if (!faacEncodeHandle) {
LOGE("%s", "FAAC encode open failed!");
return;
}
faacEncConfigurationPtr faacEncodeConfigurationPtr = faacEncGetCurrentConfiguration(
faacEncodeHandle);
//指定MPEG版本
faacEncodeConfigurationPtr->mpegVersion = MPEG4;
faacEncodeConfigurationPtr->allowMidside = 1;
faacEncodeConfigurationPtr->aacObjectType = LOW;
faacEncodeConfigurationPtr->outputFormat = 0; //输出是否包含ADTS头
faacEncodeConfigurationPtr->useTns = 1; //时域噪音控制,大概就是消爆音
faacEncodeConfigurationPtr->useLfe = 0;
faacEncodeConfigurationPtr->quantqual = 100;
faacEncodeConfigurationPtr->bandWidth = 0; //频宽
faacEncodeConfigurationPtr->shortctl = SHORTCTL_NORMAL;

//call faacEncSetConfiguration
if (!faacEncSetConfiguration(faacEncodeHandle, faacEncodeConfigurationPtr)) {
LOGE("%s", "faacEncSetConfiguration failed!");
return;
}

LOGI("%s", "faac initialization successful");
}

相对于X264的配置,FAAC的配置少了不少。这里主要的点是在初始化配置FAAC的时候必须制定MPEG版本。FAAC配置完成后就可以进行音频编码了,在AudioPusher中调用了native函数sendAudio,以下是该函数的实现。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_sendAudio(JNIEnv *env, jobject instance, jbyteArray audioData_,
jint offsetInBytes, jint sizeInBytes) {
jbyte *audioData = (*env)->GetByteArrayElements(env, audioData_, NULL);
int *pcmbuf;
unsigned char *bitbuf;
pcmbuf = (short *) malloc(inputSamples * sizeof(int));
bitbuf = (unsigned char *) malloc(maxOutputBytes * sizeof(unsigned char));
int nByteCount = 0;
unsigned int nBufferSize = (unsigned int) sizeInBytes / 2;
unsigned short *buf = (unsigned short *) audioData;
while (nByteCount < nBufferSize) {
int audioLength = inputSamples;
if ((nByteCount + inputSamples) >= nBufferSize) {
audioLength = nBufferSize - nByteCount;
}
int i;
for (i = 0; i < audioLength; i++) {//每次从实时的pcm音频队列中读出量化位数为8的pcm数据。
int s = ((int16_t *) buf + nByteCount)[i];
pcmbuf[i] = s << 8;//用8个二进制位来表示一个采样量化点(模数转换)
}
nByteCount += inputSamples;
//利用FAAC进行编码,pcmbuf为转换后的pcm流数据,audioLength为调用faacEncOpen时得到的输入采样数,bitbuf为编码后的数据buff,nMaxOutputBytes为调用faacEncOpen时得到的最大输出字节数
int byteslen = faacEncEncode(faacEncodeHandle, pcmbuf, audioLength,
bitbuf, maxOutputBytes);
if (byteslen < 1) {
continue;
}
add_audio_body_to_rtmppacket(bitbuf, byteslen);//从bitbuf中得到编码后的aac数据流,放到数据队列
}
(*env)->ReleaseByteArrayElements(env, audioData_, audioData, 0);
if (bitbuf)
free(bitbuf);
if (pcmbuf)
free(pcmbuf);


}

使用RTMP推送编码后的视频数据

在上边我们已经对Camera预览的数据进行了编码,现在需要将这些编码后的数据包装在RTMP Packet中进行发送。现在回到视频编码代码块有以下两个函数的调用。

1
2
3
4
5
6
//发送视频序列消息
add_squence_header_to_rtmppacket(sps, pps, sps_length, pps_length);
//...

//发送帧信息
add_frame_body_to_rtmppacket(nal[i].p_payload, nal[i].i_payload);

可能读者在这有疑问了,W**TF为什么要分两个函数进行封装?,稍安勿躁我慢慢道来。再本文开始对RTMP协议进行了讲解其中RTMP规定了Message中的结构类型有MessageType来进行指定,并且Message在网络传输的过程中会被拆分成Chunk Message,其中Chunk也有特定的格式进行约束。再加之H264标准中的SPS以及PPS的特殊性我们这对NAL类型进行判断,如果该NAL单元类型是SPS或者PPS则按添加头信息的方式进行封装,其他内容方式封装。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
/**
* 添加视频序列消息头至rtmppacket中
* @param sps
* @param pps
* @param sps_length
* @param pps_length
*/
void add_squence_header_to_rtmppacket(unsigned char *sps, unsigned char *pps, int sps_length,
int pps_length) {
//packet内容大小
int size = sps_length + pps_length + 16;
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);

//设置packet中的body信息
char *body = packet->m_body;
int i = 0;
/**
* (1) FrameType,4bit,帧类型
1 = key frame (for AVC, a seekable frame)
2 = inter frame (for AVC, a non-seekable frame)
3 = disposable inter frame (H.263 only)
4 = generated key frame (reserved for server use only)
5 = video info/command frame
H264的一般为1或者2.
(2)CodecID ,4bit,编码类型
1 = JPEG(currently unused)
2 = Sorenson H.263
3 = Screen video
4 = On2 VP6
5 = On2 VP6 with alpha channel
6 = Screen video version 2
7 = AVC
*/
//body 第一位
body[i++] = 0x17; //(1)-(2)4bit*2关键帧,帧内压缩
body[i++] = 0x00; //(3)8bit
body[i++] = 0x00; //(4)8bit
body[i++] = 0x00; //(5)8bit
body[i++] = 0x00; //(6)8bit

/*AVCDecoderConfigurationRecord*/
body[i++] = 0x01;//configurationVersion,版本为1
body[i++] = sps[1];//AVCProfileIndication
body[i++] = sps[2];//profile_compatibility
body[i++] = sps[3];//AVCLevelIndication

body[i++] = 0xFF;//lengthSizeMinusOne,H264 视频中 NALU的长度,计算方法是 1 + (lengthSizeMinusOne & 3),实际测试时发现总为FF,计算结果为4.


/*sps*/
body[i++] = 0xE1;//numOfSequenceParameterSets:SPS的个数,计算方法是 numOfSequenceParameterSets & 0x1F,实际测试时发现总为E1,计算结果为1.
body[i++] = (sps_length >> 8) & 0xff;//sequenceParameterSetLength:SPS的长度
body[i++] = sps_length & 0xff;//sequenceParameterSetNALUnits
memcpy(&body[i], sps, sps_length);
i += sps_length;

/*pps*/
body[i++] = 0x01;//numOfPictureParameterSets:PPS 的个数,计算方法是 numOfPictureParameterSets & 0x1F,实际测试时发现总为E1,计算结果为1.
body[i++] = (pps_length >> 8) & 0xff;//pictureParameterSetLength:PPS的长度
body[i++] = (pps_length) & 0xff;//PPS
memcpy(&body[i], pps, pps_length);
i += pps_length;

//设置packet头信息
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
packet->m_nBodySize = size;
packet->m_nTimeStamp = 0;
packet->m_hasAbsTimestamp = 0;
packet->m_nChannel = 0x04;//Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;


add_rtmp_packet_queue(packet);

}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
/**
* 添加帧信息至rtmppacket
* @param frame
* @param length
*/
void add_frame_body_to_rtmppacket(unsigned char *frame, int len) {
//去掉起始码四字节
if (frame[2] == 0x00) { //00 00 00 01
frame += 4;
len -= 4;
} else if (frame[2] == 0x01) { // 00 00 01
frame += 3;
len -= 3;
}
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
int size = len + 9;
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);
char *body = packet->m_body;
/**
* (1) FrameType,4bit,帧类型
1 = key frame (for AVC, a seekable frame)
2 = inter frame (for AVC, a non-seekable frame)
3 = disposable inter frame (H.263 only)
4 = generated key frame (reserved for server use only)
5 = video info/command frame
H264的一般为1或者2.
(2)CodecID ,4bit,编码类型
1 = JPEG(currently unused)
2 = Sorenson H.263
3 = Screen video
4 = On2 VP6
5 = On2 VP6 with alpha channel
6 = Screen video version 2
7 = AVC
*/
//判断当前nalutype是关键帧I(帧内压缩)还是普通帧P(帧间压缩)
//===================nal-type==========
//5 IDR图像中的片 关键帧可以直接解压渲染
//6 补充增强信息单元(SEI)
//7 SPS(Sequence Parameter Set序列参数集,作用于一串连续的视频图像,即视频序列)
//8 PPS(Picture Parameter Set图像参数集,作用于视频序列中的一个或多个图像
//===================nal-type==========
//nal组成
//0 00 00000
//禁止位 重要程度 nal-type
//frame[0] = 5 = 00000101
//00000101
//&
//00000111
//00000101

body[0] = 0x27;//非关键帧 帧间压缩
int type = frame[0] & 0x1f;
if (type == NAL_SLICE_IDR) {
//关键帧
body[0] = 0x17;
}
body[1] = 0x01; /*nal unit,NALUs(AVCPacketType == 1)*/
body[2] = 0x00; //composition time 0x000000 24bit
body[3] = 0x00;
body[4] = 0x00;

//写入NALU信息,右移8位,一个字节的读取?
body[5] = (len >> 24) & 0xff;
body[6] = (len >> 16) & 0xff;
body[7] = (len >> 8) & 0xff;
body[8] = (len) & 0xff;

/*copy data*/
memcpy(&body[9], frame, len);

packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = size;
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;//当前packet的类型:Video
packet->m_nChannel = 0x04; //Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
packet->m_nTimeStamp = RTMP_GetTime() - start_time;//记录了每一个tag相对于第一个tag(File Header)的相对时间
add_rtmp_packet_queue(packet);
}

使用RTMP推送编码后的音频数据

完整Native代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
//@author zhangchuan622@gmail.com
#include <jni.h>
#include <android/log.h>
#include <stdlib.h>
#include "include/x264/x264.h"
#include "include/rtmp/rtmp.h"
#include "include/queue.h"
#include <pthread.h>
#include "include/faac/faac.h"

#define PRINT_TAG "bNativeLive"
#define PUSH_URL "rtmp://39.105.76.133:9510/live/benlive"

#define SPS_OUT_BUFFER_SIZE 100
#define PPS_OUT_BUFFER_SIZE 100
#define LOGI(FORMAT, ...) __android_log_print(ANDROID_LOG_VERBOSE,PRINT_TAG,FORMAT,__VA_ARGS__)
#define LOGE(FORMAT, ...) __android_log_print(ANDROID_LOG_ERROR,PRINT_TAG,FORMAT,__VA_ARGS__)

x264_param_t param;
x264_picture_t pic;
x264_picture_t pic_out;
x264_t *x264_encoder;

pthread_t push_thread;
pthread_mutex_t push_thread_mutex;
pthread_cond_t push_cond;

//YUV长度
int y_len, u_len, v_len;
int start_time;

int is_pushing = FALSE;

//aac
unsigned long inputSamples;
unsigned long maxOutputBytes;
faacEncHandle faacEncodeHandle;

JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_setNativeVideoOptions(JNIEnv *env, jobject instance,
jint width, jint height, jint bitrate,
jint fps) {
LOGI("%s", "setNativeVideoOptions...");

//0延迟
x264_param_default_preset(&param, "ultrafast", "zerolatency");
param.i_csp = X264_CSP_I420;
param.i_width = width;
param.i_height = height;

//设置yuv长度
y_len = width * height;
u_len = y_len / 4;
v_len = u_len;

//码率控制,CQP(恒定质量),CRF(恒定码率),ABR(平均码率)
param.rc.i_rc_method = X264_RC_CRF;
//码率 单位(Kbps)
param.rc.i_bitrate = bitrate / 1000;
//瞬时最大码率
param.rc.i_vbv_max_bitrate = bitrate / 1000 * 1.2;
//通过fps控制码率,
param.b_vfr_input = 0;
//帧率分子
param.i_fps_num = fps;
//帧率分母
param.i_fps_den = 1;
param.i_timebase_den = param.i_fps_num;
param.i_timebase_num = param.i_fps_den;
//是否把SPS PPS放入每个关键帧,提高纠错能力
param.b_repeat_headers = 1;
//设置level级别,5.1
param.i_level_idc = 51;

//设置档次
x264_param_apply_profile(&param, "baseline");

x264_picture_alloc(&pic, param.i_csp, param.i_width, param.i_height);
x264_encoder = x264_encoder_open(&param);
if (x264_encoder) {
LOGI("initVideoOptions:%s", "success");
} else {
LOGE("initVideoOptions:%s", "failed");
}

}

/**
* Faac初始化
* Call faacEncOpen() for every encoder instance you need.
*To set encoder options, call faacEncGetCurrentConfiguration(), change the parameters in the structure accessible by the returned pointer and then call faacEncSetConfiguration().
*As long as there are still samples left to encode, call faacEncEncode() to encode the data. The encoder returns the bitstream data in a client-supplied buffer.
*Once you call faacEncEncode() with zero samples of input the flushing process is initiated; afterwards you may call faacEncEncode() with zero samples input only.
*faacEncEncode() will continue to write out data until all audio samples have been encoded.
*Once faacEncEncode() has returned with zero bytes written, call faacEncClose() to destroy this encoder instance.
* @param env
* @param instance
* @param sampleRateInHz
* @param channel
*/
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_setNativeAudioOptions(JNIEnv *env, jobject instance,
jint sampleRateInHz, jint channel) {
faacEncodeHandle = faacEncOpen(sampleRateInHz, channel, &inputSamples, &maxOutputBytes);
if (!faacEncodeHandle) {
LOGE("%s", "FAAC encode open failed!");
return;
}
faacEncConfigurationPtr faacEncodeConfigurationPtr = faacEncGetCurrentConfiguration(
faacEncodeHandle);
//指定MPEG版本
faacEncodeConfigurationPtr->mpegVersion = MPEG4;
faacEncodeConfigurationPtr->allowMidside = 1;
faacEncodeConfigurationPtr->aacObjectType = LOW;
faacEncodeConfigurationPtr->outputFormat = 0; //输出是否包含ADTS头
faacEncodeConfigurationPtr->useTns = 1; //时域噪音控制,大概就是消爆音
faacEncodeConfigurationPtr->useLfe = 0;
faacEncodeConfigurationPtr->quantqual = 100;
faacEncodeConfigurationPtr->bandWidth = 0; //频宽
faacEncodeConfigurationPtr->shortctl = SHORTCTL_NORMAL;

//call faacEncSetConfiguration
if (!faacEncSetConfiguration(faacEncodeHandle, faacEncodeConfigurationPtr)) {
LOGE("%s", "faacEncSetConfiguration failed!");
return;
}

LOGI("%s", "faac initialization successful");
}


/**
* 将packet添加至队列中
* @param packet
*/
void add_rtmp_packet_queue(RTMPPacket *packet) {
//lock
pthread_mutex_lock(&push_thread_mutex);
if (is_pushing) {
queue_append_last(packet);
}
//signal
pthread_cond_signal(&push_cond);
//unlock
pthread_mutex_unlock(&push_thread_mutex);
}


/**
* 添加视频序列消息头至rtmppacket中
* @param sps
* @param pps
* @param sps_length
* @param pps_length
*/
void add_squence_header_to_rtmppacket(unsigned char *sps, unsigned char *pps, int sps_length,
int pps_length) {
//packet内容大小
int size = sps_length + pps_length + 16;
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);

//设置packet中的body信息
char *body = packet->m_body;
int i = 0;
/**
* (1) FrameType,4bit,帧类型
1 = key frame (for AVC, a seekable frame)
2 = inter frame (for AVC, a non-seekable frame)
3 = disposable inter frame (H.263 only)
4 = generated key frame (reserved for server use only)
5 = video info/command frame
H264的一般为1或者2.
(2)CodecID ,4bit,编码类型
1 = JPEG(currently unused)
2 = Sorenson H.263
3 = Screen video
4 = On2 VP6
5 = On2 VP6 with alpha channel
6 = Screen video version 2
7 = AVC
*/
//body 第一位
body[i++] = 0x17; //(1)-(2)4bit*2关键帧,帧内压缩
body[i++] = 0x00; //(3)8bit
body[i++] = 0x00; //(4)8bit
body[i++] = 0x00; //(5)8bit
body[i++] = 0x00; //(6)8bit

/*AVCDecoderConfigurationRecord*/
body[i++] = 0x01;//configurationVersion,版本为1
body[i++] = sps[1];//AVCProfileIndication
body[i++] = sps[2];//profile_compatibility
body[i++] = sps[3];//AVCLevelIndication

body[i++] = 0xFF;//lengthSizeMinusOne,H264 视频中 NALU的长度,计算方法是 1 + (lengthSizeMinusOne & 3),实际测试时发现总为FF,计算结果为4.


/*sps*/
body[i++] = 0xE1;//numOfSequenceParameterSets:SPS的个数,计算方法是 numOfSequenceParameterSets & 0x1F,实际测试时发现总为E1,计算结果为1.
body[i++] = (sps_length >> 8) & 0xff;//sequenceParameterSetLength:SPS的长度
body[i++] = sps_length & 0xff;//sequenceParameterSetNALUnits
memcpy(&body[i], sps, sps_length);
i += sps_length;

/*pps*/
body[i++] = 0x01;//numOfPictureParameterSets:PPS 的个数,计算方法是 numOfPictureParameterSets & 0x1F,实际测试时发现总为E1,计算结果为1.
body[i++] = (pps_length >> 8) & 0xff;//pictureParameterSetLength:PPS的长度
body[i++] = (pps_length) & 0xff;//PPS
memcpy(&body[i], pps, pps_length);
i += pps_length;

//设置packet头信息
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
packet->m_nBodySize = size;
packet->m_nTimeStamp = 0;
packet->m_hasAbsTimestamp = 0;
packet->m_nChannel = 0x04;//Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;


add_rtmp_packet_queue(packet);

}


/**
* 添加帧信息至rtmppacket
* @param frame
* @param length
*/
void add_frame_body_to_rtmppacket(unsigned char *frame, int len) {
//去掉起始码四字节
if (frame[2] == 0x00) { //00 00 00 01
frame += 4;
len -= 4;
} else if (frame[2] == 0x01) { // 00 00 01
frame += 3;
len -= 3;
}
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
int size = len + 9;
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);
char *body = packet->m_body;
/**
* (1) FrameType,4bit,帧类型
1 = key frame (for AVC, a seekable frame)
2 = inter frame (for AVC, a non-seekable frame)
3 = disposable inter frame (H.263 only)
4 = generated key frame (reserved for server use only)
5 = video info/command frame
H264的一般为1或者2.
(2)CodecID ,4bit,编码类型
1 = JPEG(currently unused)
2 = Sorenson H.263
3 = Screen video
4 = On2 VP6
5 = On2 VP6 with alpha channel
6 = Screen video version 2
7 = AVC
*/
//判断当前nalutype是关键帧I(帧内压缩)还是普通帧P(帧间压缩)
//===================nal-type==========
//5 IDR图像中的片 关键帧可以直接解压渲染
//6 补充增强信息单元(SEI)
//7 SPS(Sequence Parameter Set序列参数集,作用于一串连续的视频图像,即视频序列)
//8 PPS(Picture Parameter Set图像参数集,作用于视频序列中的一个或多个图像
//===================nal-type==========
//nal组成
//0 00 00000
//禁止位 重要程度 nal-type
//frame[0] = 5 = 00000101
//00000101
//&
//00000111
//00000101

body[0] = 0x27;//非关键帧 帧间压缩
int type = frame[0] & 0x1f;
if (type == NAL_SLICE_IDR) {
//关键帧
body[0] = 0x17;
}
body[1] = 0x01; /*nal unit,NALUs(AVCPacketType == 1)*/
body[2] = 0x00; //composition time 0x000000 24bit
body[3] = 0x00;
body[4] = 0x00;

//写入NALU信息,右移8位,一个字节的读取?
body[5] = (len >> 24) & 0xff;
body[6] = (len >> 16) & 0xff;
body[7] = (len >> 8) & 0xff;
body[8] = (len) & 0xff;

/*copy data*/
memcpy(&body[9], frame, len);

packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = size;
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;//当前packet的类型:Video
packet->m_nChannel = 0x04; //Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
packet->m_nTimeStamp = RTMP_GetTime() - start_time;//记录了每一个tag相对于第一个tag(File Header)的相对时间
add_rtmp_packet_queue(packet);
}

/**
* @param env
* @param instance
* @param data_
*/
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_sendVideo(JNIEnv *env, jobject instance, jbyteArray data_) {
jbyte *data = (*env)->GetByteArrayElements(env, data_, NULL);
//将NV21格式数据转换为YUV420
//NV21转YUV420p的公式:(Y不变)Y=Y,U=Y+1+1,V=Y+1
jbyte *y = pic.img.plane[0];
jbyte *u = pic.img.plane[1];
jbyte *v = pic.img.plane[2];
//设置y
memcpy(y, data, y_len);
//设置u,v
for (int i = 0; i < u_len; ++i) {
*(u + i) = *(data + y_len + i * 2 + 1);
*(v + i) = *(data + y_len + i * 2);
}

//使用x264编码
x264_nal_t *nal = NULL;
int n_nal = -1;
if (x264_encoder_encode(x264_encoder, &nal, &n_nal, &pic, &pic_out) < 0) {
LOGE("%s", "x264 encode error");
return;
}

//设置SPS PPS
unsigned char sps[SPS_OUT_BUFFER_SIZE];
unsigned char pps[PPS_OUT_BUFFER_SIZE];
int sps_length, pps_length;
//reset
memset(sps, 0, SPS_OUT_BUFFER_SIZE);
memset(pps, 0, PPS_OUT_BUFFER_SIZE);

pic.i_pts += 1; //顺序累加
for (int i = 0; i < n_nal; ++i) {
if (nal[i].i_type == NAL_SPS) {
//00 00 00 01;07;payload
//不复制四字节起始码,设置sps_length的长度为总长度-四字节起始码长度
sps_length = nal[i].i_payload - 4;
//复制sps数据
memcpy(sps, nal[i].p_payload + 4, sps_length);
} else if (nal[i].i_type == NAL_PPS) {
pps_length = nal[i].i_payload - 4;
memcpy(pps, nal[i].p_payload + 4, pps_length);

//发送视频序列消息
add_squence_header_to_rtmppacket(sps, pps, sps_length, pps_length);
} else {
//发送帧信息
add_frame_body_to_rtmppacket(nal[i].p_payload, nal[i].i_payload);
}

}


(*env)->ReleaseByteArrayElements(env, data_, data, 0);
}


/**
* 将音频头信息发送
* 音频头消息只发送一次
*/
void add_audio_squence_header_to_rtmppacket() {
unsigned char *ppBuffer;
unsigned long pSizeOfDecoderSpecificInfo;
faacEncGetDecoderSpecificInfo(faacEncodeHandle, &ppBuffer, &pSizeOfDecoderSpecificInfo);
//AAC Header占用2字节
int size = pSizeOfDecoderSpecificInfo + 2;
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);
//设置packet中的body信息
char *body = packet->m_body;

/**
* 1、SoundFormat,4bit
0 = Linear PCM, platform endian
1 = ADPCM
2 = MP3
3 = Linear PCM, little endian
4 = Nellymoser 16 kHz mono
5 = Nellymoser 8 kHz mono
6 = Nellymoser
7 = G.711 A-law logarithmic PCM
8 = G.711 mu-law logarithmic PCM
9 = reserved
10 = AAC
11 = Speex
14 = MP3 8 kHz
15 = Device-specific sound
2、SoundRate,2bit,抽样频率
0 = 5.5 kHz
1 = 11 kHz
2 = 22 kHz
3 = 44 kHz
对于AAC音频来说,总是0x11,即44khz.
3、SoundSize,1bit,音频的位数。
0 = 8-bit samples
1 = 16-bit samples
AAC总是为0x01,16位。
4、SoundType,1bit,声道
0 = Mono sound
1 = Stereo sound
*/
//10+3+1+1
body[0] = 0xAF;
body[1] = 0x00;
//copy audio data
memcpy(&body[2], ppBuffer, pSizeOfDecoderSpecificInfo);

//设置rtmppacket
packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = size;
packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
packet->m_nChannel = 0x04; //Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;
packet->m_nTimeStamp = RTMP_GetTime() - start_time;//记录了每一个tag相对于第一个tag(File Header)的相对时间
add_rtmp_packet_queue(packet);

}

/**
* 使用rtmppacket将aac编码后的音频数据打包入队
* @param bitbuf
* @param byteslen
*/
void add_audio_body_to_rtmppacket(unsigned char *bitbuf, int byteslen) {
//AAC Header占用2字节
int size = byteslen + 2;
RTMPPacket *packet = malloc(sizeof(RTMPPacket));
RTMPPacket_Alloc(packet, size);
RTMPPacket_Reset(packet);
//设置packet中的body信息
char *body = packet->m_body;

/**
* 1、SoundFormat,4bit
0 = Linear PCM, platform endian
1 = ADPCM
2 = MP3
3 = Linear PCM, little endian
4 = Nellymoser 16 kHz mono
5 = Nellymoser 8 kHz mono
6 = Nellymoser
7 = G.711 A-law logarithmic PCM
8 = G.711 mu-law logarithmic PCM
9 = reserved
10 = AAC
11 = Speex
14 = MP3 8 kHz
15 = Device-specific sound
2、SoundRate,2bit,抽样频率
0 = 5.5 kHz
1 = 11 kHz
2 = 22 kHz
3 = 44 kHz
对于AAC音频来说,总是0x11,即44khz.
3、SoundSize,1bit,音频的位数。
0 = 8-bit samples
1 = 16-bit samples
AAC总是为0x01,16位。
4、SoundType,1bit,声道
0 = Mono sound
1 = Stereo sound
5、AACPacketType,8bit。
这个字段来表示AACAUDIODATA的类型:0 = AAC sequence header,1 = AAC raw。第一个音频包用0,后面的都用1。
*/
//10+3+1+1
body[0] = 0xAF;
body[1] = 0x01;
//copy audio data
memcpy(&body[2], bitbuf, byteslen);

//设置rtmppacket
packet->m_hasAbsTimestamp = 0;
packet->m_nBodySize = size;
packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
packet->m_nChannel = 0x04; //Audio和Video通道
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
packet->m_nTimeStamp = RTMP_GetTime() - start_time;//记录了每一个tag相对于第一个tag(File Header)的相对时间
add_rtmp_packet_queue(packet);
}

/**
* 使用AAC进行音频编码
* @param env
* @param instance
* @param audioData_
* @param offsetInBytes
* @param sizeInBytes
*/
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_sendAudio(JNIEnv *env, jobject instance, jbyteArray audioData_,
jint offsetInBytes, jint sizeInBytes) {
jbyte *audioData = (*env)->GetByteArrayElements(env, audioData_, NULL);
int *pcmbuf;
unsigned char *bitbuf;
pcmbuf = (short *) malloc(inputSamples * sizeof(int));
bitbuf = (unsigned char *) malloc(maxOutputBytes * sizeof(unsigned char));
int nByteCount = 0;
unsigned int nBufferSize = (unsigned int) sizeInBytes / 2;
unsigned short *buf = (unsigned short *) audioData;
while (nByteCount < nBufferSize) {
int audioLength = inputSamples;
if ((nByteCount + inputSamples) >= nBufferSize) {
audioLength = nBufferSize - nByteCount;
}
int i;
for (i = 0; i < audioLength; i++) {//每次从实时的pcm音频队列中读出量化位数为8的pcm数据。
int s = ((int16_t *) buf + nByteCount)[i];
pcmbuf[i] = s << 8;//用8个二进制位来表示一个采样量化点(模数转换)
}
nByteCount += inputSamples;
//利用FAAC进行编码,pcmbuf为转换后的pcm流数据,audioLength为调用faacEncOpen时得到的输入采样数,bitbuf为编码后的数据buff,nMaxOutputBytes为调用faacEncOpen时得到的最大输出字节数
int byteslen = faacEncEncode(faacEncodeHandle, pcmbuf, audioLength,
bitbuf, maxOutputBytes);
if (byteslen < 1) {
continue;
}
add_audio_body_to_rtmppacket(bitbuf, byteslen);//从bitbuf中得到编码后的aac数据流,放到数据队列
}
(*env)->ReleaseByteArrayElements(env, audioData_, audioData, 0);
if (bitbuf)
free(bitbuf);
if (pcmbuf)
free(pcmbuf);


}

JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_prepare(JNIEnv *env, jobject instance) {

// TODO

}

JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_stopPush(JNIEnv *env, jobject instance) {
is_pushing = FALSE;
// TODO

}

JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_free(JNIEnv *env, jobject instance) {

// TODO

}

/**
* 从队列中读取Packet,使用RTMP发送
* @param arg
* @return
*/
void *push_thread_func(void *arg) {
RTMP *rtmp = RTMP_Alloc();
RTMP_Init(rtmp);
//设置连接超时时间
rtmp->Link.timeout = 10;
RTMP_SetupURL(rtmp, PUSH_URL);
//发送rtmp数据
RTMP_EnableWrite(rtmp);
//建立连接
if (!RTMP_Connect(rtmp, NULL)) {
LOGE("connect [%s] result:%s", PUSH_URL, "建立连接失败!");
goto end;
} else {
LOGE("connect [%s] result:%s", PUSH_URL, "successful");
}
start_time = RTMP_GetTime();
if (!RTMP_ConnectStream(rtmp, 0)) {
LOGE("connect [%s] result:%s", PUSH_URL, "RTMP_ConnectStream failed!");
goto end;
}
is_pushing = TRUE;
//send audio header packet
add_audio_squence_header_to_rtmppacket();
//send
while (is_pushing) {
pthread_mutex_lock(&push_thread_mutex);
pthread_cond_wait(&push_cond, &push_thread_mutex);
//从队列中获取第一个packet
RTMPPacket *packet = queue_get_first();
if (packet) {
//移除
queue_delete_first();
packet->m_nInfoField2 = rtmp->m_stream_id; //RTMP协议,stream_id数据
int i = RTMP_SendPacket(rtmp, packet, TRUE); //TRUE放入librtmp队列中,并不是立即发送
if (!i) {
RTMPPacket_Free(packet);
pthread_mutex_unlock(&push_thread_mutex);
goto end;
} else {
LOGI("%s", "rtmp send packet");
}
RTMPPacket_Free(packet);
}
pthread_mutex_unlock(&push_thread_mutex);
}


end:
RTMP_Close(rtmp);
RTMP_Free(rtmp);
return 0;
}
/**
* 当开始直播时
* @param env
* @param instance
*/
JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_startPush(JNIEnv *env, jobject instance) {
//创建队列
create_queue();
//初始化线程
pthread_mutex_init(&push_thread_mutex, NULL);
pthread_cond_init(&push_cond, NULL);
pthread_create(&push_thread, NULL, push_thread_func, NULL);

}

JNIEXPORT void JNICALL
Java_com_ben_android_live_NativePush_pausePush(JNIEnv *env, jobject instance) {

// TODO

}
随意分享,您的支持将鼓励我继续创作!