C#如何将如何把多张jpg文件生成mp4文件

大侠帮忙 C# 怎么将jpg转换成RMVB或者是MP4格式 在线等!!!
[问题点数:60分,结帖人Ryan]
大侠帮忙 C# 怎么将jpg转换成RMVB或者是MP4格式 在线等!!!
[问题点数:60分,结帖人Ryan]
不显示删除回复
显示所有回复
显示星级回复
显示得分回复
只显示楼主
2012年8月 扩充话题大版内专家分月排行榜第二
2008年9月 .NET技术大版内专家分月排行榜第一
2012年8月 扩充话题大版内专家分月排行榜第二
2014年1月 荣获微软MVP称号2012年1月 荣获微软MVP称号
2010年11月 挨踢职涯大版内专家分月排行榜第一2010年10月 挨踢职涯大版内专家分月排行榜第一
匿名用户不能发表回复!|C#怎么获取mp4文件的时长 - C#当前位置:& &&&C#怎么获取mp4文件的时长C#怎么获取mp4文件的时长www.MyException.Cn&&网友分享于:&&浏览:431次C#如何获取mp4文件的时长?我用了各种办法,在win7下都是可以获得的,但是在xp下就是获取不到,求大虾指点。------解决方案--------------------
ffmpeg -i获取视频信息
------解决方案--------------------
这是我项目中用的一个ffmepg类。。。C# code
using System.Collections.G
using System.T
using System.ComponentM
using System.Runtime.CompilerS
using System.IO;
using System.W
namespace AUV5.Common
public class FormatConverter
//FFmpeg配置信息
private string ffmpegpath = &/FFmpeg/ffmpeg.exe&;//FFmpeg的服务器路径
private string imgsize = &400*300&;
//视频截图大小
private string videosize = &480*360&; //视频大小
#region 也可将信息添加到配置文件中
//public static string ffmpegpath = ConfigurationManager.AppSettings[&ffmpeg&];
//public static string imgsize = ConfigurationManager.AppSettings[&imgsize&];
//public static string videosize = ConfigurationManager.AppSettings[&videoize&];
#endregion
private string destVideo = &&;
/// &summary&
/// 视频路径
/// &/summary&
public string DestVideo
get { return destV }
set { destVideo = }
private string destImage = &&;
/// &summary&
/// 图片路径
/// &/summary&
public string DestImage
get { return destI }
set { destImage = }
/// &summary&
/// 视频长度
/// &/summary&
public string VideoLength { }
//文件类型
public enum VideoType
[Description(&.avi&)]
[Description(&.mov&)]
[Description(&.mpg&)]
[Description(&.mp4&)]
[Description(&.flv&)]
/// &summary&
/// 返回枚举类型的描述信息
/// &/summary&
/// &param name=&myEnum&&&/param&
/// &returns&&/returns&
private string GetDiscription(System.Enum myEnum)
System.Reflection.FieldInfo fieldInfo = myEnum.GetType().GetField(myEnum.ToString());
object[] attrs = fieldInfo.GetCustomAttributes(typeof(DescriptionAttribute), true);
if (attrs != null && attrs.Length & 0)
DescriptionAttribute desc = attrs[0] as DescriptionA
if (desc != null)
return desc.Description.ToLower();
return myEnum.ToString();
//将GetDesCription定义为扩展方法,需.net3.5
//public static string Description(this Enum myEnum)
return GetDiscription(myEnum);
//构造函数
//创建目录
public FormatConverter()
#region 使用FFmpeg进行格式转换
/// &summary&
/// 运行格式转换
/// &/summary&
/// &param name=&sourceFile&&要转换文件绝对路径&/param&
/// &param name=&destPath&&转换结果存储的相对路径&/param&
/// &param name=&videotype&&要转换成的文件类型&/param&
/// &param name=&createImage&&是否生成截图&/param&
/// &returns&
/// 执行成功返回空,否则返回错误信息
/// &/returns&
public string Convert(string sourceFile, string destPath,string uniquename, VideoType videotype, bool createImage,bool getDuration)
//取得ffmpeg.exe的物理路径
string ffmpeg = System.Web.HttpContext.Current.Server.MapPath(ffmpegpath);
if (!File.Exists(ffmpeg))
return &找不到格式转换程序!&;
if (!File.Exists(sourceFile))
return &找不到源文件!&;
//string uniquename = FileHelper.GetUniqueFileName();
string filename = uniquename + GetDiscription(videotype);
string destFile = HttpContext.Current.Server.MapPath(destPath + filename);
//if (Path.GetExtension(sourceFile).ToLower() != GetDiscription(videotype).ToLower())
System.Diagnostics.ProcessStartInfo FilestartInfo = new System.Diagnostics.ProcessStartInfo(ffmpeg);
FilestartInfo.WindowStyle = System.Diagnostics.ProcessWindowStyle.H
/*ffmpeg参数说明
* -i 1.avi
* -ab/-ac &比特率& 设定声音比特率,前面-ac设为立体声时要以一半比特率来设置,比如192kbps的就设成96,转换
均默认比特率都较小,要听到较高品质声音的话建议设到160kbps(80)以上
* -ar &采样率& 设定声音采样率,PSP只认24000
* -b &比特率& 指定压缩比特率,似乎ffmpeg是自动VBR的,指定了就大概是平均比特率,比如768,1500这样的
--加了以后转换不正常
* -r 29.97 桢速率(可以改,确认非标准桢率会导致音画不同步,所以只能设定为15或者29.97)
* s 320x240 指定分辨率
* 最后的路径为目标文件
FilestartInfo.Arguments = & -i & + sourceFile + & -ab 80 -ar 22050 -r 29.97 -s & + videosize + & & + destF
//FilestartInfo.Arguments = &-y -i & + sourceFile + & -s 320x240 -vcodec h264 -qscale 4
-ar 24000 -f psp -muxvb 768 & + destF
System.Diagnostics.Process.Start(FilestartInfo);
destVideo = destPath +
return &格式转换失败!&;
//格式不需要转换则直接复制文件到目录
File.Copy(sourceFile, destFile,true);
destVideo = destPath +
//提取视频长度
if (getDuration)
VideoLength = GetVideoDuration(ffmpeg, sourceFile);
//提取图片
if (createImage)
//定义进程
System.Diagnostics.ProcessStartInfo ImgstartInfo = new System.Diagnostics.ProcessStartInfo(ffmpeg);
string imgpath = destPath + uniquename + &.jpg&;// FileHelper.GetUniqueFileName(&.jpg&);
ConvertImage(sourceFile, imgpath, imgsize, ImgstartInfo);
imgpath = destPath + uniquename + &_thumb.jpg&;
DestImage = ConvertImage(sourceFile, imgpath, &80*80&, ImgstartInfo);
return &&;
private string ConvertImage(string sourceFile, string imgpath, string imgsize, System.Diagnostics.ProcessStartInfo ImgstartInfo)
ImgstartInfo.WindowStyle = System.Diagnostics.ProcessWindowStyle.H
/*参数设置
* -y(覆盖输出文件,即如果生成的文件(flv_img)已经存在的话,不经提示就覆盖掉了)
* -i 1.avi 输入文件
* -f image2 指定输出格式
* -ss 8 后跟的单位为秒,从指定时间点开始转换任务
* -vframes
* -s 指定分辨率
//duration: 00:00:00.00
string[] time = VideoLength.Split(':');
int seconds = int.Parse(time[0]) * 60 * 60 + int.Parse(time[1]) * 60 + int.Parse(time[2]);
int ss = seconds & 5 ? 5 : seconds - 1;
ImgstartInfo.Arguments = & -i & + sourceFile + & -y -f image2 -ss & + ss.ToString() + & -vframes 1 -s & + imgsize + & & + HttpContext.Current.Server.MapPath(imgpath);
System.Diagnostics.Process.Start(ImgstartInfo);
return &&;
private string GetVideoDuration(string ffmpegfile, string sourceFile)
using (System.Diagnostics.Process ffmpeg = new System.Diagnostics.Process())
// soon will hold our video's duration in the form &HH:MM:SS.UU&
// temp variable holding a string representation of our video's duration
// StringWriter to hold output from ffmpeg
// we want to execute the process without opening a shell
ffmpeg.StartInfo.UseShellExecute =
//ffmpeg.StartInfo.ErrorDialog =
ffmpeg.StartInfo.WindowStyle = System.Diagnostics.ProcessWindowStyle.H
// redirect StandardError so we can parse it
// for some reason the output comes through over StandardError
ffmpeg.StartInfo.RedirectStandardError =
// set the file name of our process, including the full path
// (as well as quotes, as if you were calling it from the command-line)
ffmpeg.StartInfo.FileName =
// set the command-line arguments of our process, including full paths of any files
// (as well as quotes, as if you were passing these arguments on the command-line)
ffmpeg.StartInfo.Arguments = &-i & + sourceF
// start the process
ffmpeg.Start();
// now that the process is started, we can redirect output to the StreamReader we defined
errorreader = ffmpeg.StandardE
// wait until ffmpeg comes back
ffmpeg.WaitForExit();
// read the output from ffmpeg, which for some reason is found in Process.StandardError
result = errorreader.ReadToEnd();
// a little convoluded, this string manipulation...
// working from the inside out, it:
// takes a substring of result, starting from the end of the &Duration: & label contained within,
// (execute &ffmpeg.exe -i somevideofile& on the command-line to verify for yourself that it is there)
// and going the full length of the timestamp
duration = result.Substring(result.IndexOf(&Duration: &) + (&Duration: &).Length, (&00:00:00&).Length);
#endregion
} 共&2&页:
12345678910
12345678910
12345678910 上一篇:下一篇:文章评论相关解决方案 12345678910 Copyright & &&版权所有一、常用视频格式分辨率
720p格式,分辨率为p / 60Hz,行频为45kHz
1080p格式,分辨率为逐行扫描,专业格式
二、FFmpeg部分参数说明:
//参数说明
* -i filename(input) 源文件目录
* -y 输出新文件,是否强制覆盖已有文件
* -c 指定编码器
* -fs limit_size(outinput) 设置文件大小的限制,以字节表示的。没有进一步的字节块被写入后,超过极限。输出文件的大小略大于所请求的文件大小。
* -s 视频比例
4:3 320x240/640x480/800x600
,默认值 'wxh',和原视频大小相同
* -vframes number(output) 将视频帧的数量设置为输出。别名:-frames:v
* -dframes number (output) 将数据帧的数量设置为输出.别名:-frames:d
* -frames[:stream_specifier] framecount (output,per-stream) 停止写入流之后帧数帧。
* -bsf[:stream_specifier] bitstream_filters (output,per-stream)
指定输出文件流格式,例如输出h264编码的MP4文件:ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
* -r 29.97 桢速率(可以改,确认非标准桢率会导致音画不同步,所以只能设定为15或者29.97)
三、使用实例代码:
public class Demo2
public static string ffmpegtool = @"F:\SolutionSet\ABCSolution\VideoSolution\Demo1\bin\Debug\ffmpeg.exe";
//public static string ffmpegtool = @"F:\ABCSolution\ffmpeg--ce2217b-win64-static\bin\ffplay.exe";
public static string playFile = @"F:\SolutionSet\ABCSolution\VideoSolution\VideoSolution\Content\Video\my3.mp4";
public static string imgFile = @"F:\SolutionSet\ABCSolution\VideoSolution\VideoSolution\Content\Video\my3.gif";
public static string sourceFile = @"F:\SolutionSet\ABCSolution\VideoSolution\VideoSolution\Content\Video\COOLUI.mp4";
//public static string sourceFile = @"F:\ABCSolution\VideoSolution\VideoSolution\Content\Video\theme.mp4";
public void ConvertVideo()
Process p = new Process();//建立外部调用线程
p.StartInfo.FileName =//要调用外部程序的绝对路径
//参数(这里就是FFMPEG的参数了)
//p.StartInfo.Arguments = @"-i "+sourceFile+ " -ab 56
-b a -ar 44100 -b 500 -r 29.97 -s
-y " + playFile+"";
// p.StartInfo.Arguments = "-y -i \""+sourceFile+"\" -b v
-s 800x600 -r 29.97 -b 1500 -acodec aac -ac 2 -ar 24000 -ab 128 -vol 200 -f psp
\""+playFile+"\" ";
//string strArg = "-i " + sourceFile + " -y -s 640x480 " + playFile + " ";
string strArg = "-i " + sourceFile + " -y -s
" + playFile + " ";
//获取图片
//截取图片jpg
//string strArg = "-i " + sourceFile + " -y -f image2 -t 1 " + imgF
//string strArg = "-i " + sourceFile + " -y -s
-f image2 -t 1 " + imgF
//视频截取
//string strArg = "
-i " + sourceFile + " -y
-frames 100
//转化gif动画
//string strArg = "-i " + sourceFile + " -y -s
-f gif -vframes 30 " + imgF
//string strArg = "
-i " + sourceFile + " -y
-f gif -vframes 50 " + imgF
// string strArg = "
-i " + sourceFile + " -y
-f gif -ss 0:20
-dframes 10 -frames 50 " + imgF
//显示基本信息
//string strArg = "-i " + sourceFile + " -n OUTPUT";
//播放视频
//string strArg = "-stats -i " + sourceFile + " ";
p.StartInfo.Arguments = strA
p.StartInfo.UseShellExecute = false;//不使用操作系统外壳程序启动线程(一定为FALSE,详细的请看MSDN)
p.StartInfo.RedirectStandardError = true;//把外部程序错误输出写到StandardError流中(这个一定要注意,FFMPEG的所有输出信息,都为错误输出流,用StandardOutput是捕获不到任何消息的...这是我耗费了2个多月得出来的经验...mencoder就是用standardOutput来捕获的)
p.StartInfo.CreateNoWindow = false;//不创建进程窗口
p.ErrorDataReceived += new DataReceivedEventHandler(Output);//外部程序(这里是FFMPEG)输出流时候产生的事件,这里是把流的处理过程转移到下面的方法中,详细请查阅MSDN
p.Start();//启动线程
p.BeginErrorReadLine();//开始异步读取
p.WaitForExit();//阻塞等待进程结束
p.Close();//关闭进程
p.Dispose();//释放资源
private void Output(object sendProcess, DataReceivedEventArgs output)
if (!String.IsNullOrEmpty(output.Data))
//处理方法...
Console.WriteLine(output.Data);
////去获取时长
//string partitio1 = @"Duration: \d{2}:\d{2}:\d{2}.\d{2}";
//if (RegexHelper.IsMatch(partitio1, output.Data))
string partition = @"(?&=Duration: )\d{2}:\d{2}:\d{2}.\d{2}";
string timespan = RegexHelper.Matchs(output.Data, partition).FirstOrDefault();
if (TimeSpan.TryParse(timespan, out span))
Console.WriteLine(span.TotalMilliseconds);
////获取时刻
//string partitio2 = @"time=\d{2}:\d{2}:\d{2}.\d{2}";
//if (RegexHelper.IsMatch(partitio2, output.Data))
string partition = @"(?&=time=)\d{2}:\d{2}:\d{2}.\d{2}";
string timespan = RegexHelper.Matchs(output.Data, partition).FirstOrDefault();
if (TimeSpan.TryParse(timespan, out span))
Console.WriteLine(span.TotalMilliseconds);
更多参考:
阅读(...) 评论()如何能够将H264的码流保存为某种媒体文件,如mp4
[问题点数:40分,结帖人chentank]
如何能够将H264的码流保存为某种媒体文件,如mp4
[问题点数:40分,结帖人chentank]
不显示删除回复
显示所有回复
显示星级回复
显示得分回复
只显示楼主
匿名用户不能发表回复!|录制程序要继续添加新功能:模拟电视,板卡发送出来的是rtsp流(h264视频+alaw(pcma)音频)。
由于之前做过将rtp流(h264视频+aac音频)录制合成mp4文件(参见),很自然的就决定将其合成为mp4文件。
但是有些不同:
(1)需要解析RTSP协议。研究了一下RFC2326,发现也不是很复杂。
  rtsp分控制流和数据流:控制流就是客户端向服务端发送控制命令,包括查看节目信息、播放、停止节目等,一般是通过TCP协议通信的;数据流就是服务端将音视频数据发送到指定的地址、端口上,我们的音频和视频单独发送到两个不同的端口上,采用的是UDP协议。采用TCP或UDP,在RTSP协议中并没有明确规定,可以根据实际情况确定。
  控制流采用的是HTTP文本协议,比较简单、方便调试,这个RTSP协议中也没有规定必须使用HTTP,不过一般都是采用HTTP来实现的。
  大致步骤:
  1.&客户端连接rtsp服务器,发送option方法;服务器返回可用的方法,通常有DESCRIBE,SETUP,PLAY,TEARDOWN等,由于板卡端的rtsp服务程序也是我们自己实现的,可以确保已经实现了这些方法,因此客户端就没有进行检查了;
  2.&客户端发送DESCRIBE方法,服务器返回RTSP流的相关信息,包括video stream,audio stream的个数、码率、分辨率等参数信息;
  3. 根据返回的参数信息,客户端决定要播放哪些video stream,audio stream,发送SETUP方法;
   我们的RTSP流为:一个alaw audio 和一个h264 video,需要指定音视频数据分别发送到哪个端口上,通过下面的代码来构造发送消息: 
1 int RTSP::Set_Setup()
int nRet = -1;
int m_nIndex = 0;
if (m_pBuf != NULL)
if (m_pContentBase == NULL)
sprintf(m_pBuf, "SETUP %s/%s %s\r\n", m_strUrl.c_str(), m_pMedia-&p_control, RTSP_VERSSION);
sprintf(m_pBuf, "SETUP %s%s %s\r\n", m_pContentBase, m_pMedia-&p_control, RTSP_VERSSION);
printf("m_pContentBase:%s\n", m_pContentBase);
printf("m_strUrl:%s\n", m_strUrl.c_str());
printf("m_pMedia-&p_control:%s\n", m_pMedia-&p_control);
printf("m_pBuf:%s\n", m_pBuf);
sprintf(m_pBuf, "SETUP %s %s\r\n", m_pMedia-&p_control, RTSP_VERSSION);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "CSeq: %d\r\n", m_nSeqNum);
m_nIndex = strlen(m_pBuf);
if (m_pMedia-&i_media_type == VIDEO)
GetVideoPort();
sprintf(m_pBuf + m_nIndex, "Transport: %s;%s;client_port=%d-%d\r\n", "RTP/AVP", "unicast", m_nVideoPort, m_nVideoPort + 1);
m_nIndex = strlen(m_pBuf);
else if (m_pMedia-&i_media_type == AUDIO)
GetAudioPort();
sprintf(m_pBuf + m_nIndex, "Transport: %s;%s;client_port=%d-%d\r\n", "RTP/AVP", "unicast", m_nAudioPort, m_nAudioPort + 1);
m_nIndex = strlen(m_pBuf);
if (m_pSession[0] != 0)
sprintf(m_pBuf + m_nIndex, "Session: %s\r\n", m_pSession);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "User-Agent: %s\r\n", USER_AGENT_STR);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "\r\n");
m_nIndex = strlen(m_pBuf);
m_nBufSize = m_nI
  4. SETUP成功之后,通过PLAY命令就可以进行播放了:
1 int RTSP::Set_Play()
int nRet = -1;
int m_nIndex = 0;
if (m_pBuf != NULL)
sprintf(m_pBuf, "PLAY %s %s\r\n", m_strUrl.c_str(), RTSP_VERSSION);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "CSeq: %d\r\n", m_nSeqNum);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "Session: %s\r\n", m_pSession);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "Range: npt=0.000-\r\n");
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "User-Agent: %s\r\n", USER_AGENT_STR);
m_nIndex = strlen(m_pBuf);
sprintf(m_pBuf + m_nIndex, "\r\n");
m_nIndex = strlen(m_pBuf);
m_nBufSize = m_nI
  这样我们就可以在刚才指定的端口上接收UDP的音视频数据了。
&更详细的可以参考rtsp协议的实现。
(2)合成MP4.
我们已经知道音视频格式分别为:alaw(pcma), h264;查看文档发现,mp4v2刚好支持这两种格式,剩下就很简单了:
1 bool COutputATV::CreateMp4File(string filename)
m_Mp4File = MP4CreateEx(filename.c_str());
if (m_Mp4File == MP4_INVALID_FILE_HANDLE)
return false;
MP4SetTimeScale(m_Mp4File, 90000);
m_nVideoTrack = MP4AddH264VideoTrack(m_Mp4File,
//timescale
//sample duration:/*(90000 / 25)*/
why 3214? read the commets below.
0x64, //sps[1] AVCProfileIndication
0x00, //sps[2] profile_compat
0x1f, //sps[3] AVCLevelIndication
3); // 4 bytes length before each NAL unit
if (m_nVideoTrack == MP4_INVALID_TRACK_ID)
LOG(LOG_TYPE_ERROR, "CreateMp4File():MP4AddH264VideoTrack() failed.");
return false;
MP4SetVideoProfileLevel(m_Mp4File, 0x7F);
m_nAudioTrack = MP4AddALawAudioTrack(m_Mp4File,
//timescale
//sampleDuration.
/* NOTICE:
* in standard release of mp4v2 library(v1.9.1, and trunk-r479),the function MP4AddALawAudioTrack() does not specify the 3rd param:
* 'sampleDuration', it calculate a fixed duration value with the following formula:
uint32_t fixedSampleDuration = (timeScale * 20)/1000; // 20mSec/Sample
* please read the source code of MP4AddALawAudioTrack().
* they can do it in this way because RFC3551 defines PCMA(a-law) as 20msec per sample, so the duration is a fixed value, please read RFC
* 3551:http://www.ietf.org/rfc/rfc3551.txt
* but, the souce boards' we used does not follow the RFC specifition, we found the sample duration value is 500.
* (why the param is 500? every rtp packet contains
a timestamp, the duration is the difference of two samples(not rtp packets), the same as
* h264 tracks in rtp). SO:
* I modified the declarion of MP4AddALawAudioTrack(), add the 3rd param:'sampleDuration', to pass the actual duration value,I also modified
* the implmention of MP4AddALawAudioTrack().
* as a result:
* ***************************
***************************
* when distribute the Record software, you MUST use the mp4v2 library distribute with it,
* please DO NOT use the standard release download from network!
* ***********************************************************************************
* we use the default value of duration when creating mp4 file, we will modify it later when begin to write the first two samples with its
* actual value.
* Added by:Zhengfeng Rao.
MP4SetTrackIntegerProperty(m_Mp4File,
m_nAudioTrack,
"mdia.minf.stbl.stsd.alaw.channels",
if (m_nAudioTrack == MP4_INVALID_TRACK_ID)
LOG(LOG_TYPE_ERROR, "CreateMp4File():MP4AddAudioTrack() failed.");
return false;
MP4SetAudioProfileLevel(m_Mp4File, 0x02);
return true;
写音视频数据:
1 void COutputATV::DecodeRtp(unsigned char *pbuf, int datalength)
if((pbuf == NULL) || (datalength &= 0))
rtp_header_t rtp_
char cType = pbuf[0];
//the 1st byte indicate the node is audio/video, it's added by the input thread, so we need to remove it.
pbuf += 1;
datalength -= 1;
int i_header_size = GetRtpHeader(&rtp_header, pbuf, datalength);
if(i_header_size &=0 )
LOG(LOG_TYPE_ERROR, "COutputATV::DecodeRtp() Invalid header size:%d", i_header_size);
if(cType == 'A')
if (rtp_header.i_pt == 0x8)//AUDIO
int i_size = datalength - i_header_
if (m_nAudioTimeStamp == 0)
m_nAudioTimeStamp = rtp_header.i_
if (m_nAudioTimeStamp != rtp_header.i_timestamp)//got a frame
MP4WriteSample(m_Mp4File, m_nAudioTrack, m_pAudioFrame, m_nAudioFrameIndex);
m_nAudioFrameIndex = 0;
m_nAudioTimeStamp = rtp_header.i_
memcpy(m_pAudioFrame + m_nAudioFrameIndex, pbuf + i_header_size, i_size);
m_nAudioFrameIndex += i_
memcpy(m_pAudioFrame + m_nAudioFrameIndex, pbuf + i_header_size, i_size);
m_nAudioFrameIndex += i_
//INVALID packet.
else if(cType == 'V')
if (rtp_header.i_pt == 0x60)// VIDEO
char p_save_buf[4096] = {0};
int i_size = RtpToH264(pbuf, datalength, p_save_buf, &m_nNaluOkFlag, &m_nLastPktNum);
if(i_size &= 0)
DumpFrame(pbuf, datalength);
LOG_PERIOD(LOG_TYPE_WARN, "RtpToH264() Illegal packet, igonred. datalength = %d, i_size = %d", datalength-1, i_size);
if (m_nVideoTimeStamp == 0)
m_nVideoTimeStamp = rtp_header.i_
m_nVideoFrameIndex = 0;
memcpy(m_pVideoFrame + m_nVideoFrameIndex, p_save_buf, i_size);
m_nVideoFrameIndex += i_
if (m_nVideoTimeStamp != rtp_header.i_timestamp || p_save_buf[12] == 0x78)
if (m_nVideoFrameIndex &= 4)
unsigned int* p = (unsigned int*) (&m_pVideoFrame[0]);
*p = htonl(m_nVideoFrameIndex - 4);
MP4WriteSample(m_Mp4File, m_nVideoTrack, m_pVideoFrame, m_nVideoFrameIndex, MP4_INVALID_DURATION, 0, 1);
//DumpFrame(m_pVideoFrame, m_nVideoFrameIndex);
m_nVideoFrameIndex = 0;
m_nVideoTimeStamp = rtp_header.i_
memcpy(m_pVideoFrame + m_nVideoFrameIndex, p_save_buf, i_size);
m_nVideoFrameIndex += i_
//printf("2.3.3*************i_size:%d, m_nVideoFrameIndex:%d\n", i_size, m_nVideoFrameIndex);
memcpy(m_pVideoFrame + m_nVideoFrameIndex, p_save_buf, i_size);
m_nVideoFrameIndex += i_
//INVALID packet.
//INVALID packet.
需要说明的是:
libmp4v2通过MP4AddALawAudioTrack(mp4file, timescale,sampleDuration)添加alaw音频时,第三个参数sampleDuration是我自己修改libmp4v2库添加的。
因为libmp4v2中 MP4AddALawAudioTrack接口为:MP4AddALawAudioTrack(mp4file, timescale),sampleDuration是通过如下公式计算得到的:
uint32_t fixedSampleDuration = (timeScale * 20)/1000; // 20mSec/Sample
而这计算出来的值,并不符合我们的实际情况,所以我添加了这第三个参数,可以自己指定sample duration。
阅读(...) 评论()

我要回帖

更多关于 ppt如何生成mp4文件 的文章

 

随机推荐