当前位置: 首页 > 知识库问答 >
问题:

用FFmpeg从音视频文件中穆星

楚丰羽
2023-03-14
  void open_audio(AVFormatContext *oc, AVCodec **codec, AVStream **st ,enum AVCodecID codec_id){

    //    AVCodecContext *c;
    int ret;
    //    c = st->codec;

    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",avcodec_get_name(codec_id));

    }
    /* open it */



    if(avformat_open_input(&oc,_audioInName.c_str(),NULL,NULL) !=0){

        Msg::PrintErrorMsg("Error opening audio file");

    }


    AVStream* audioStream = NULL;

    // Find the audio stream (some container files can have multiple streams in them)

    for (uint32_t i = 0; i < oc->nb_streams; ++i)

    {

        if (oc->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)

        {

            audioStream = oc->streams[i];

            break;

        }

    }

    if (audioStream == NULL)
    {
        Msg::PrintErrorMsg("Could not find any audio stream in the file");

    }
    *st =audioStream;

    AVCodecContext *c  = audioStream->codec;
    c->codec = *codec;//avcodec_find_decoder(c->codec_id);
    audioStream->id = 1;
    c->sample_fmt  = AV_SAMPLE_FMT_S16;
    c->bit_rate    = 64000;
    c->sample_rate = 44100;
    c->channels    = 1;

    if (oc->oformat->flags & AVFMT_GLOBALHEADER){
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    }

    if (c->codec == NULL)
    {
        Msg::PrintErrorMsg("Couldn't find a proper decoder");

    }

    ret = avcodec_open2(c, *codec, NULL);
    if (ret < 0) {

        Msg::PrintErrorMsg("Could not open audio codec\n");

    }

}

然后我尝试这样写音频帧:

  void write_audio_frame(AVFormatContext *oc, AVStream *st){
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame = avcodec_alloc_frame();
    int got_packet, ret;
    av_init_packet(&pkt);
    c = st->codec;
    /////
    //  get_audio_frame(samples, audio_input_frame_size, c->channels);

    ////Read the packet:
    while(av_read_frame(oc,&pkt) == 0 ){

        if(pkt.stream_index ==st->index){

        // Try to decode the packet into a frame
        int frameFinished = 0;
        avcodec_decode_audio4(c, frame, &frameFinished, &pkt);

        // Some frames rely on multiple packets, so we have to make sure the frame is finished before
        // we can use it
        if (frameFinished){
            assert(frameFinished);
            ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
            if (ret < 0) {
                Msg::PrintErrorMsg("Error encoding audio frame\n");

            }
            if (!got_packet){
                printf("failed to aquire packet");
            }
            pkt.stream_index = st->index;
            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
            if (ret != 0) {

                Msg::PrintErrorMsg("Error while writing audio frame.");
            }

          }
        }

       }
    }
    av_free_packet(&pkt);
    avcodec_free_frame(&frame);
}

问题是我从来不传递这个语句:“if(pkt.stream_index==st->index)”。数据包流索引从来不等于音频流索引。谁能指出我错在哪里?

更新:

   while(frame_count < _streamDurationNBFrames-1){

        uint8_t *frameToWrite =_frames.front();


        // Compute current audio and video time. ///

        if (audio_st){
            audio_pts = (double)audioIn_st->pts.val * audioIn_st->time_base.num / audioIn_st->time_base.den;
        }
        else{

            audio_pts = 0.0;
        }
        if (video_st){

            video_pts = (double)video_st->pts.val * video_st->time_base.num /   video_st->time_base.den;

        }else{
            video_pts = 0.0;
        }


        if ((!audio_st || audio_pts >= _streamDuration) && (!video_st || video_pts >= _streamDuration)){

            break;

        }


        if (audio_st && audio_pts < video_pts) {
            av_read_frame(informat, &pkt);//read audio from input stream
             Msg::PrintMsg("Encode audio here...");

          //==================   AUDIO ENCODE HERE   


           outpkt.data = pkt.data;
           outpkt.size = pkt.size;
           outpkt.stream_index = pkt.stream_index;
           outpkt.flags |= AV_PKT_FLAG_KEY;
           outpkt.pts = pkt.pts;
           outpkt.dts =pkt.dts;
           if(av_interleaved_write_frame(oc, &outpkt) < 0)
           {
            Msg::PrintErrorMsg("Fail Audio Write ");
           }
           else
           {
               audio_st->codec->frame_number++;
           }
           av_free_packet(&outpkt);
           av_free_packet(&pkt);



         }else{
          //==================   VIDEO  ENCODE HERE   

            write_video_frame(oc, video_st,frameToWrite);

            frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
         }

        ///at last delete this frame:
        _frames.pop();
        delete frameToWrite; ///deallocate the written frame!
    }
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; is always zero because  (double)audio_st->pts.val  returns zero.

顺便说一句,下面的答案没有帮助,因为它假设音频和视频流都来自同一个文件,而在我的情况下,只有音频来自外部源。

共有1个答案

聂迪
2023-03-14

你可以使你的上下文相同,不需要两个独立的上下文。如果同时对视频和音频进行编码。然后您将首先需要创建视频流,然后音频流。然而,如果您只想对音频进行编码,则只需要创建音频流。如果(pkt.stream_index==st->index)在代码转换时通常是必需的,即在更改容器格式时。其中可以从视频文件中读取帧并写入到另一个文件中,所以你需要知道帧是来自音频还是视频流。然而,如果您正在获得解码的音频包,那么您将需要在进行av_interleaved_write之前在音频包中设置适当的流索引

在您的代码中,您没有设置正确编码所需的音频数据包的pts和dts。

前段时间我写了一个类似的程序,你可以看看它作为你的参考。

int VideoClipper::Init(const wxString& filename)
{
    int ret = 0;
    char errbuf[64];

    av_register_all();
    if ((ret = avformat_open_input( &m_informat, filename.mb_str(), 0, 0)) != 0 )
    {
        av_strerror(ret,errbuf,sizeof(errbuf));
        PRINT_VAL("Not able to Open file;; ", errbuf)
        ret = -1;
        return ret;
    }
    else
    {
        PRINT_MSG("Opened File ")
    }

    if ((ret = avformat_find_stream_info(m_informat, 0))< 0 )
    {

        av_strerror(ret,errbuf,sizeof(errbuf));
        PRINT_VAL("Not Able to find stream info:: ", errbuf)
        ret = -1;
        return ret;
    }
    else
    {
        PRINT_MSG("Got stream Info ")
    }

    for(unsigned int i = 0; i<m_informat->nb_streams; i++)
    {
        if(m_informat->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {

            PRINT_MSG("Found Video Stream ")
            m_in_vid_strm_idx = i;
            m_in_vid_strm = m_informat->streams[i];
        }

        if(m_informat->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            PRINT_MSG("Found Audio Stream ")
            m_in_aud_strm_idx = i;
            m_in_aud_strm = m_informat->streams[i];
        }
    }

    if(m_in_aud_strm_idx == -1 && m_in_vid_strm_idx == -1)
    {
       ret = -1;    
    }

    if(m_informat->duration == AV_NOPTS_VALUE)
    {
        if(m_in_vid_strm_idx != -1 && m_informat->streams[m_in_vid_strm_idx])
        {
            if(m_informat->streams[m_in_vid_strm_idx]->duration != AV_NOPTS_VALUE)
            {
                //m_in_end_time = (m_informat->streams[m_in_vid_strm_idx]->duration)/(AV_TIME_BASE);
                m_in_end_time = (m_informat->streams[m_in_vid_strm_idx]->duration)/(m_informat->streams[m_in_vid_strm_idx]->time_base.den/m_informat->streams[m_in_vid_strm_idx]->time_base.num);

            }

        }
        else if(m_in_aud_strm_idx != -1 && m_informat->streams[m_in_aud_strm_idx])
        {
            if(m_informat->streams[m_in_aud_strm_idx]->duration != AV_NOPTS_VALUE)
            {
                m_in_end_time = (m_informat->streams[m_in_aud_strm_idx]->duration)/(AV_TIME_BASE);
            }
        }
    }
    else
    {
        m_in_end_time = (m_informat->duration)/(AV_TIME_BASE);
    }

    if(m_in_vid_strm_idx != -1 && m_informat->streams[m_in_vid_strm_idx])
    {
        if(m_informat->streams[m_in_vid_strm_idx]->r_frame_rate.num != AV_NOPTS_VALUE && m_informat->streams[m_in_vid_strm_idx]->r_frame_rate.den != 0)
        {
            m_fps =  (m_informat->streams[m_in_vid_strm_idx]->r_frame_rate.num)/ (m_informat->streams[m_in_vid_strm_idx]->r_frame_rate.den);
        }
    }
    else
    {
        m_fps = 25;    
    }
    AVOutputFormat *outfmt = NULL;
    std::string outfile = std::string(filename) + "clip_out.avi";
    outfmt = av_guess_format(NULL,outfile.c_str(),NULL);

    if(outfmt == NULL)
    {
        ret = -1;
        return ret;
    }
    else
    {
        m_outformat = avformat_alloc_context();
        if(m_outformat)
        {
            m_outformat->oformat = outfmt;
            _snprintf(m_outformat->filename, sizeof(m_outformat->filename), "%s", outfile.c_str());    
        }
        else
        {
            ret = -1;
            return ret;
        }
    }

    AVCodec *out_vid_codec,*out_aud_codec;
    out_vid_codec = out_aud_codec = NULL;

    if(outfmt->video_codec != AV_CODEC_ID_NONE && m_in_vid_strm != NULL)
    {
        out_vid_codec = avcodec_find_encoder(outfmt->video_codec);
        if(NULL == out_vid_codec)
        {
            PRINT_MSG("Could Not Find Vid Encoder")
            ret = -1;
            return ret;
        }
        else
        {
            PRINT_MSG("Found Out Vid Encoder ")
            m_out_vid_strm = avformat_new_stream(m_outformat, out_vid_codec);
            if(NULL == m_out_vid_strm)
            {
                 PRINT_MSG("Failed to Allocate Output Vid Strm ")
                 ret = -1;
                 return ret;
            }
            else
            {
                 PRINT_MSG("Allocated Video Stream ")
                 if(avcodec_copy_context(m_out_vid_strm->codec, m_informat->streams[m_in_vid_strm_idx]->codec) != 0)
                 {
                    PRINT_MSG("Failed to Copy Context ")
                    ret = -1;
                    return ret;
                 }
                 else
                 {
                    m_out_vid_strm->sample_aspect_ratio.den = m_out_vid_strm->codec->sample_aspect_ratio.den;
                    m_out_vid_strm->sample_aspect_ratio.num = m_in_vid_strm->codec->sample_aspect_ratio.num;
                    PRINT_MSG("Copied Context ")
                    m_out_vid_strm->codec->codec_id = m_in_vid_strm->codec->codec_id;
                    m_out_vid_strm->codec->time_base.num = 1;
                    m_out_vid_strm->codec->time_base.den = m_fps*(m_in_vid_strm->codec->ticks_per_frame);         
                    m_out_vid_strm->time_base.num = 1;
                    m_out_vid_strm->time_base.den = 1000;
                    m_out_vid_strm->r_frame_rate.num = m_fps;
                    m_out_vid_strm->r_frame_rate.den = 1;
                    m_out_vid_strm->avg_frame_rate.den = 1;
                    m_out_vid_strm->avg_frame_rate.num = m_fps;
                    m_out_vid_strm->duration = (m_out_end_time - m_out_start_time)*1000;
                 }
               }
            }
      }

    if(outfmt->audio_codec != AV_CODEC_ID_NONE && m_in_aud_strm != NULL)
    {
        out_aud_codec = avcodec_find_encoder(outfmt->audio_codec);
        if(NULL == out_aud_codec)
        {
            PRINT_MSG("Could Not Find Out Aud Encoder ")
            ret = -1;
            return ret;
        }
        else
        {
            PRINT_MSG("Found Out Aud Encoder ")
            m_out_aud_strm = avformat_new_stream(m_outformat, out_aud_codec);
            if(NULL == m_out_aud_strm)
            {
                PRINT_MSG("Failed to Allocate Out Vid Strm ")
                ret = -1;
                return ret;
            }
            else
            {
                if(avcodec_copy_context(m_out_aud_strm->codec, m_informat->streams[m_in_aud_strm_idx]->codec) != 0)
                {
                    PRINT_MSG("Failed to Copy Context ")
                    ret = -1;
                    return ret;
                }
                else
                 {
                    PRINT_MSG("Copied Context ")
                    m_out_aud_strm->codec->codec_id = m_in_aud_strm->codec->codec_id;
                    m_out_aud_strm->codec->codec_tag = 0;
                    m_out_aud_strm->pts = m_in_aud_strm->pts;
                    m_out_aud_strm->duration = m_in_aud_strm->duration;
                    m_out_aud_strm->time_base.num = m_in_aud_strm->time_base.num;
                    m_out_aud_strm->time_base.den = m_in_aud_strm->time_base.den;

                }
            }
         }
      }

      if (!(outfmt->flags & AVFMT_NOFILE)) 
      {
        if (avio_open2(&m_outformat->pb, outfile.c_str(), AVIO_FLAG_WRITE,NULL, NULL) < 0) 
        {
                PRINT_VAL("Could Not Open File ", outfile)
                ret = -1;
                return ret;
        }
      }
        /* Write the stream header, if any. */
      if (avformat_write_header(m_outformat, NULL) < 0) 
      {
            PRINT_VAL("Error Occurred While Writing Header ", outfile)
            ret = -1;
            return ret;
      }
      else
      {
            PRINT_MSG("Written Output header ")
            m_init_done = true;
      }

    return ret;
}

int VideoClipper::GenerateClip(void)
{
    AVPacket pkt, outpkt;
    int aud_pts = 0, vid_pts = 0, aud_dts = 0, vid_dts = 0;
    int last_vid_pts = 0;
    if(m_good_clip)
    {
        SeekFrame();
        while(av_read_frame(m_informat, &pkt) >= 0 && (m_num_frames-- > 0))
        {
            if(pkt.stream_index == m_in_vid_strm_idx)
            {
                PRINT_VAL("ACTUAL VID Pkt PTS ",av_rescale_q(pkt.pts,m_in_vid_strm->time_base, m_in_vid_strm->codec->time_base))
                PRINT_VAL("ACTUAL VID Pkt DTS ", av_rescale_q(pkt.dts, m_in_vid_strm->time_base, m_in_vid_strm->codec->time_base ))
                av_init_packet(&outpkt);
                if(pkt.pts != AV_NOPTS_VALUE)
                {
                    if(last_vid_pts == vid_pts)
                    {
                        vid_pts++;
                        last_vid_pts = vid_pts;
                    }
                    outpkt.pts = vid_pts;   
                    PRINT_VAL("ReScaled VID Pts ", outpkt.pts)
                }
                else
                {
                    outpkt.pts = AV_NOPTS_VALUE;
                }

                if(pkt.dts == AV_NOPTS_VALUE)
                {
                    outpkt.dts = AV_NOPTS_VALUE;
                }
                else
                {
                    outpkt.dts = vid_pts;
                    PRINT_VAL("ReScaled VID Dts ", outpkt.dts)
                    PRINT_MSG("=======================================")
                }

                outpkt.data = pkt.data;
                outpkt.size = pkt.size;
                outpkt.stream_index = pkt.stream_index;
                outpkt.flags |= AV_PKT_FLAG_KEY;
                last_vid_pts = vid_pts;
                if(av_interleaved_write_frame(m_outformat, &outpkt) < 0)
                {
                    PRINT_MSG("Failed Video Write ")
                }
                else
                {
                    m_out_vid_strm->codec->frame_number++;
                }
                av_free_packet(&outpkt);
                av_free_packet(&pkt);
            }
            else if(pkt.stream_index == m_in_aud_strm_idx)
            {
                PRINT_VAL("ACTUAL AUD Pkt PTS ", av_rescale_q(pkt.pts, m_in_aud_strm->time_base, m_in_aud_strm->codec->time_base))
                PRINT_VAL("ACTUAL AUD Pkt DTS ", av_rescale_q(pkt.dts, m_in_aud_strm->time_base, m_in_aud_strm->codec->time_base))
                //num_aud_pkt++;
                av_init_packet(&outpkt);
                if(pkt.pts != AV_NOPTS_VALUE)
                {
                    outpkt.pts = aud_pts;
                    PRINT_VAL("ReScaled AUD PTS ", outpkt.pts)
                }
                else
                {
                    outpkt.pts = AV_NOPTS_VALUE;
                }

                if(pkt.dts == AV_NOPTS_VALUE)
                {
                    outpkt.dts = AV_NOPTS_VALUE;
                }
                else
                {
                    outpkt.dts = aud_pts;
                    PRINT_VAL("ReScaled AUD DTS ", outpkt.dts)
                    PRINT_MSG("====================================")
                    if( outpkt.pts >= outpkt.dts)
                    {
                        outpkt.dts = outpkt.pts;
                    }
                    if(outpkt.dts == aud_dts)
                    {
                        outpkt.dts++;
                    }
                    if(outpkt.pts < outpkt.dts)
                    {
                        outpkt.pts = outpkt.dts;
                        aud_pts = outpkt.pts;
                    }
                }

                outpkt.data = pkt.data;
                outpkt.size = pkt.size;
                outpkt.stream_index = pkt.stream_index;
                outpkt.flags |= AV_PKT_FLAG_KEY;
                vid_pts = aud_pts;
                aud_pts++;
                if(av_interleaved_write_frame(m_outformat, &outpkt) < 0)
                {
                    PRINT_MSG("Faile Audio Write ")
                }
                else
                {
                    m_out_aud_strm->codec->frame_number++;
                }
                av_free_packet(&outpkt);
                av_free_packet(&pkt);
        }
        else
        {
            PRINT_MSG("Got Unknown Pkt ")
            //num_unkwn_pkt++;
        }
        //num_total_pkt++;
    }

    av_write_trailer(m_outformat);
    av_free_packet(&outpkt);
    av_free_packet(&pkt);
    return 0;    
 }
    return -1;
}
 类似资料:
  • 我想从视频中提取对齐的音频流。目标是获得与视频精确对齐的音频序列。 问题:视频和音频序列不对齐。输出音频持续时间比视频输入短。 要复制的脚本: 我的尝试(没有成功): 按照此答案中的建议添加 添加,同时导出视频(链接) 在Audacity中打开。那里的持续时间是 在VLC中打开。持续时间: 显式设置帧率 其他视频文件 如果能给我一些建议,我将不胜感激。非常感谢。

  • 我在尝试连接FFMPEG中的多个文件时遇到了一个问题;我的目标是通过串联不同类型的幻灯片来创建视频演示文稿: (a)图像幻灯片,通过循环帧片刻转换成视频。这些类型的幻灯片没有音频,所以我为它们添加了静音音轨: (b)视频幻灯片,它有一个覆盖的水印,并持续到视频结束。如果文件不包含音频,则添加的方式与前面的情况相同: 因此,一旦我有了所有生成的文件和一个包含所有文件名的。txt文件,我想使用简单的命

  • 我有一个长音频部分和一个短视频部分,我想在一起mux。 我正在尝试使用以下命令进行MUX: video_0-0002.h264-整个文件(2秒长) Audio.wav-从4秒到6秒 但音频被搞砸了...我怎样才能正确地做呢? 也试过了,听起来好像最后还是有寂静。

  • 我无法使用以下python代码从mp4文件创建带有ffmpeg的单声道音频文件 这是我的mp4文件的媒体信息 输出 {'index':'0','codec\u name':'h264','codec\u long\u name':'H.264/AVC/MPEG-4 AVC/MPEG-4第10部分','profile':'High','codec\u type':'video','codec\u t

  • 正在尝试从图像(1080p.png)音乐(320kbmp3)为youtube制作视频。 但转化是缓慢的。有什么想法吗,它是如何使优化的<代码>E:\U测试

  • 问题内容: 我现在可以编译ffmpeg并将其添加到项目创建的Android.mk文件的jni文件夹中,我想使用ffmpeg从存储在静态arraylist中的图像创建视频文件 我已经搜索了很多,但找不到任何教程,对此表示感谢。 问题答案: 我也有类似的需求,并且达到了相同的目的。您可以通过两种方式执行此操作。我想先分享一个简单的例子。 在Android内部创建一个临时文件夹。 将图像复制到新文件夹中