-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathDataSink.cpp
executable file
·164 lines (137 loc) · 5.72 KB
/
DataSink.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
/*
* DataSink.cpp
*
* Created on: Dec 23, 2014
* Author: rayan
*/
#include "DataSink.h"
#include "SPropParameterSetParser.h"
#include <memory>
#define DATA_SINK_VIDEO_RECEIVE_BUFFER_SIZE 1048576 // 1024KB (for TCP used), ffmpeg has a socket buffer default of 64KB (UDP)
enum FrameFormat {
SPS_Data = 0x7,
PPS_Data = 0x8,
I_Frame = 0x5,
P_Frame = 0x1,
};
static const size_t startCodeLength = 4;
static const char startCode[startCodeLength] = {0x00, 0x00, 0x00, 0x01};
inline FrameInfo* FrameNew(int frame_size = 4096)
{
FrameInfo* frame = (FrameInfo*)malloc(sizeof(FrameInfo)+frame_size);
if (frame == NULL)
return(NULL);
frame->pdata = (char *)frame + sizeof(FrameInfo);
frame->frameHead.FrameLen = frame_size;
return(frame);
}
DataSink* DataSink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) {
return new DataSink(env, subsession, streamId);
}
DataSink::DataSink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
: MediaSink(env),
m_Subsession(subsession) {
m_pStreamId = strDup(streamId);
m_pReceiveBuffer = new unsigned char[DATA_SINK_VIDEO_RECEIVE_BUFFER_SIZE];
memset(&m_mediainfo, 0, sizeof(MediaInfo));
m_sps = m_Subsession.fmtp_spropparametersets();
if( !strcmp( m_Subsession.mediumName(), "video" ) ) {
m_mediainfo.codecType = CODEC_TYPE_VIDEO;
m_mediainfo.i_format = FOURCC('u','n','d','f');
m_mediainfo.video.fps = m_Subsession.videoFPS();
m_mediainfo.video.height = m_Subsession.videoHeight();
m_mediainfo.video.width = m_Subsession.videoWidth();
// Now, just H264
if( !strcmp( m_Subsession.codecName(), "H264" ) ) {
unsigned int num = 0;
std::unique_ptr<SPropRecord[]> pSpsRecord(
parseSPropParameterSets(m_sps, num));
int extraSpsSize = 0;
for (unsigned int i = 0; i < num; ++i)
extraSpsSize += (startCodeLength + pSpsRecord[i].sPropLength);
m_mediainfo.extraSPS = new char[extraSpsSize + 32];
for (unsigned int i = 0; i < num; ++i) {
memcpy(&m_mediainfo.extraSPS[m_mediainfo.extraSPS_Len],
startCode, startCodeLength);
m_mediainfo.extraSPS_Len += startCodeLength;
memcpy(&m_mediainfo.extraSPS[m_mediainfo.extraSPS_Len],
pSpsRecord[i].sPropBytes, pSpsRecord[i].sPropLength);
m_mediainfo.extraSPS_Len += pSpsRecord[i].sPropLength;
}
m_mediainfo.i_format = FOURCC('h', '2', '6', '4');
m_mediainfo.b_packetized = false;
CSPropParameterSetParser spsParser(m_sps);
m_mediainfo.video.height = spsParser.GetHeight();
m_mediainfo.video.width = spsParser.GetWidth();
}
}
}
DataSink::~DataSink() {
safeArrayDelete(m_pReceiveBuffer);
safeArrayDelete(m_pStreamId);
safeArrayDelete(m_mediainfo.extraSPS);
}
void DataSink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds) {
DataSink* sink = (DataSink*)clientData;
if( frameSize > DATA_SINK_VIDEO_RECEIVE_BUFFER_SIZE ) {
;//err("buffer overflow" );
}
do {
if (sink->m_mediainfo.i_format == FOURCC('h', '2', '6', '4')) {
FrameInfo *p_block;
static bool hasRecvFirst_I_Frame = false;
unsigned char type = (sink->m_pReceiveBuffer[0] & 0x1f); // check the first 5 bits
if (!hasRecvFirst_I_Frame && I_Frame == type)
hasRecvFirst_I_Frame = true;
if (hasRecvFirst_I_Frame && (I_Frame == type || P_Frame == type)) {
p_block = FrameNew(frameSize + startCodeLength);
memcpy(p_block->pdata, startCode, startCodeLength);
memcpy(&p_block->pdata[startCodeLength], sink->m_pReceiveBuffer,
frameSize);
unsigned long i_pts = presentationTime.tv_sec * 1000
+ presentationTime.tv_usec / 1000;
p_block->frameHead.FrameType =
(long) (sink->m_mediainfo.codecType);
p_block->frameHead.TimeStamp = i_pts;
p_block->frameHead.frameDuration = durationInMicroseconds;
sink->m_frameQueue.put(p_block);
}
}
} while(false);
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}
// If you don't want to see debugging output for each received frame, then comment out the following line:
//#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
void DataSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (m_pStreamId != NULL)
envir() << "Stream \"" << m_pStreamId << "\"; ";
envir() << m_Subsession.mediumName() << "/" << m_Subsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0)
envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
if (m_Subsession.rtpSource() != NULL && !m_Subsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
}
#ifdef DEBUG_PRINT_NPT
envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
envir() << "\n";
#endif
// Then continue, to request the next frame of data:
continuePlaying();
}
Boolean DataSink::continuePlaying() {
if (fSource == NULL)
return False; // sanity check (should not happen)
// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(m_pReceiveBuffer, DATA_SINK_VIDEO_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}