成人国产在线小视频_日韩寡妇人妻调教在线播放_色成人www永久在线观看_2018国产精品久久_亚洲欧美高清在线30p_亚洲少妇综合一区_黄色在线播放国产_亚洲另类技巧小说校园_国产主播xx日韩_a级毛片在线免费

資訊專欄INFORMATION COLUMN

android ijkplayer c層分析-prepare過程與讀取線程(續(xù)1-解碼粗略分析)

zhonghanwen / 2729人閱讀

摘要:分別為音頻視頻和字母進行相關(guān)處理。向下跟蹤兩層,會發(fā)現(xiàn),核心函數(shù)是。至此解碼算完了。整個過程真是粗略分析啊,對自己也很抱歉,暫時先這樣吧。

上文中說到在read_thread線程中有個關(guān)鍵函數(shù):avformat_open_input(utils.c),應(yīng)當是讀取視頻文件的,這個函數(shù)屬于ffmpeg層。這回進入到其中去看下:

int avformat_open_input(AVFormatContext **ps, const char *filename,
                        AVInputFormat *fmt, AVDictionary **options)
{
    ......
    if ((ret = init_input(s, filename, &tmp)) < 0)
        goto fail;
        s->probe_score = ret;

    if (!s->protocol_whitelist && s->pb && s->pb->protocol_whitelist) {
        s->protocol_whitelist = av_strdup(s->pb->protocol_whitelist);
        if (!s->protocol_whitelist) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (!s->protocol_blacklist && s->pb && s->pb->protocol_blacklist) {
        s->protocol_blacklist = av_strdup(s->pb->protocol_blacklist);
        if (!s->protocol_blacklist) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ",") <= 0) {
        av_log(s, AV_LOG_ERROR, "Format not on whitelist "%s"
", s->format_whitelist);
        ret = AVERROR(EINVAL);
        goto fail;
    }
    ......
}    

init_input這個函數(shù)的注釋是:Open input file and probe the format if necessary.打開文件判斷格式。進入到這個函數(shù)中觀察,代碼不多,關(guān)鍵點av_probe_input_buffer2,ffmpeg里的格式分析函數(shù)。里面會調(diào)用avio_read,讀取文件。進入到avio_read函數(shù)內(nèi)部,看到主要是循環(huán)讀取packet,從AVIOContext的隊列中讀取,通過AVIOContext中的函數(shù)指針read_packet來進行。

下面退回到read_thread函數(shù)中,看看對h264的視頻流如何處理的。

static int read_thread(void *arg)
{
    ......
    for (i = 0; i < ic->nb_streams; i++) {
        AVStream *st = ic->streams[i];
        enum AVMediaType type = st->codecpar->codec_type;
        st->discard = AVDISCARD_ALL;
        if (type >= 0 && ffp->wanted_stream_spec[type] && st_index[type] == -1)
            if (avformat_match_stream_specifier(ic, st, ffp->wanted_stream_spec[type]) > 0)
                st_index[type] = i;

        // choose first h264

        if (type == AVMEDIA_TYPE_VIDEO) {
            enum AVCodecID codec_id = st->codecpar->codec_id;
            video_stream_count++;
            if (codec_id == AV_CODEC_ID_H264) {
                h264_stream_count++;
                if (first_h264_stream < 0)
                    first_h264_stream = i;
            }
        }
    }
    ......
}

循環(huán)讀取stream,然后判斷是h264后記錄下來。繼續(xù)往下看又到了上文提到的stream_component_open函數(shù),這回進入看看:

static int stream_component_open(FFPlayer *ffp, int stream_index)
{
    ......
    codec = avcodec_find_decoder(avctx->codec_id);
    ......
    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
    if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
            goto out;
    ......
    case AVMEDIA_TYPE_VIDEO:
    if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
            goto out;
    ......
    case AVMEDIA_TYPE_SUBTITLE:
    ......
}

這是個很長的函數(shù),我看起來是分為2部分,前半部分是尋找解碼器,后半部分的switch case是開始進行解碼。分別為音頻、視頻和字母進行相關(guān)處理。decoder_start是個關(guān)鍵函數(shù),開始進行解碼處理。進入這個函數(shù)內(nèi)部看:

static int decoder_start(Decoder *d, int (*fn)(void *), void *arg, const char *name)
{
    packet_queue_start(d->queue);
    d->decoder_tid = SDL_CreateThreadEx(&d->_decoder_tid, fn, arg, name);
    if (!d->decoder_tid) {
        av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s
", SDL_GetError());
        return AVERROR(ENOMEM);
    }
    return 0;
}

很短,不是嗎,但是啟動了線程。根據(jù)參數(shù)傳遞得知就是上面?zhèn)鬟f進來的video_thread和audio_thread是關(guān)鍵線程函數(shù)??吹竭@里可以了解,解碼過程是完全異步的,那么再去看看關(guān)鍵的線程函數(shù)吧。

static int video_thread(void *arg)
{
    FFPlayer *ffp = (FFPlayer *)arg;
    int       ret = 0;

    if (ffp->node_vdec) {
        ret = ffpipenode_run_sync(ffp->node_vdec);
    }
    return ret;
}

很短,先看參數(shù),從上面的可得知,是FFPlayer類型,從read_thread函數(shù)中就傳遞進來的一個結(jié)構(gòu),可以說是播放器的結(jié)構(gòu),播放所需要的所有內(nèi)容這里幾乎都有了。繼續(xù)看關(guān)鍵函數(shù)ffpipenode_run_sync。向下跟蹤兩層,會發(fā)現(xiàn),核心函數(shù)是ffplay_video_thread。這個函數(shù)內(nèi)部看起來挺麻煩,進入看看吧:

static int ffplay_video_thread(void *arg)
{
    ......
    for (;;) {
        ret = get_video_frame(ffp, frame);
        ......
        ret = av_buffersrc_add_frame(filt_in, frame);
        if (ret < 0)
            goto the_end;
        
        while (ret >= 0) {
            is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
            if (ret < 0) {
                if (ret == AVERROR_EOF)
                    is->viddec.finished = is->viddec.pkt_serial;
                ret = 0;
                break;
            }

            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
                is->frame_last_filter_delay = 0;
            tb = filt_out->inputs[0]->time_base;
#endif
            duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
            pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
            ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
            av_frame_unref(frame);
#if CONFIG_AVFILTER
        }
#endif
    }
}

關(guān)鍵點首先是get_video_frame,然后是av_buffersrc_add_frame和后面的while循環(huán)部分里的queue_picture。那么不得不進入到get_video_frame中看下:

static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
{
    VideoState *is = ffp->is;
    int got_picture;

    ffp_video_statistic_l(ffp);
    if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)
        return -1;

    if (got_picture) {
        double dpts = NAN;

        if (frame->pts != AV_NOPTS_VALUE)
            dpts = av_q2d(is->video_st->time_base) * frame->pts;

        frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);

        if (ffp->framedrop>0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
            if (frame->pts != AV_NOPTS_VALUE) {
                double diff = dpts - get_master_clock(is);
                if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
                    diff - is->frame_last_filter_delay < 0 &&
                    is->viddec.pkt_serial == is->vidclk.serial &&
                    is->videoq.nb_packets) {
                    is->frame_drops_early++;
                    is->continuous_frame_drops_early++;
                    if (is->continuous_frame_drops_early > ffp->framedrop) {
                        is->continuous_frame_drops_early = 0;
                    } else {
                        av_frame_unref(frame);
                        got_picture = 0;
                    }
                }
            }
        }
    }

    return got_picture;
}

關(guān)鍵點只有一個就是decoder_decode_frame,好吧,繼續(xù)往下看,層級有點多了哈:

static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
    int got_frame = 0;

    do {
        int ret = -1;

        if (d->queue->abort_request)
            return -1;

        if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
            AVPacket pkt;
            do {
                if (d->queue->nb_packets == 0)
                    SDL_CondSignal(d->empty_queue_cond);
                if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
                    return -1;
                if (pkt.data == flush_pkt.data) {
                    avcodec_flush_buffers(d->avctx);
                    d->finished = 0;
                    d->next_pts = d->start_pts;
                    d->next_pts_tb = d->start_pts_tb;
                }
            } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
            av_packet_unref(&d->pkt);
            d->pkt_temp = d->pkt = pkt;
            d->packet_pending = 1;
        }

        switch (d->avctx->codec_type) {
            case AVMEDIA_TYPE_VIDEO: {
                ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
                if (got_frame) {
                    ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
                    if (ffp->decoder_reorder_pts == -1) {
                        frame->pts = av_frame_get_best_effort_timestamp(frame);
                    } else if (!ffp->decoder_reorder_pts) {
                        frame->pts = frame->pkt_dts;
                    }
                }
                }
                break;
            case AVMEDIA_TYPE_AUDIO:
                ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
                if (got_frame) {
                    AVRational tb = (AVRational){1, frame->sample_rate};
                    if (frame->pts != AV_NOPTS_VALUE)
                        frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
                    else if (d->next_pts != AV_NOPTS_VALUE)
                        frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
                    if (frame->pts != AV_NOPTS_VALUE) {
                        d->next_pts = frame->pts + frame->nb_samples;
                        d->next_pts_tb = tb;
                    }
                }
                break;
            case AVMEDIA_TYPE_SUBTITLE:
                ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
                break;
            default:
                break;
        }

        if (ret < 0) {
            d->packet_pending = 0;
        } else {
            d->pkt_temp.dts =
            d->pkt_temp.pts = AV_NOPTS_VALUE;
            if (d->pkt_temp.data) {
                if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
                    ret = d->pkt_temp.size;
                d->pkt_temp.data += ret;
                d->pkt_temp.size -= ret;
                if (d->pkt_temp.size <= 0)
                    d->packet_pending = 0;
            } else {
                if (!got_frame) {
                    d->packet_pending = 0;
                    d->finished = d->pkt_serial;
                }
            }
        }
    } while (!got_frame && !d->finished);

    return got_frame;
}

packet_queue_get_or_buffering從解碼前的隊列中讀取一幀的數(shù)據(jù),然后調(diào)用avcodec_decode_video2。不能進去看了,沒完了,總之是讀取一幀。往回倒,回到ffplay_video_thread里,下面就是queue_picture,將一幀解碼后的圖像放入解碼后隊列。
至此解碼算完了。整個過程真是粗略分析啊,對自己也很抱歉,暫時先這樣吧。后面有空繼續(xù)就某個點深入進行。

文章版權(quán)歸作者所有,未經(jīng)允許請勿轉(zhuǎn)載,若此文章存在違規(guī)行為,您可以聯(lián)系管理員刪除。

轉(zhuǎn)載請注明本文地址:http://systransis.cn/yun/66711.html

相關(guān)文章

  • android ijkplayer c分析-prepare過程讀取線程(續(xù)3-解碼核心video

    摘要:基本上就是對一個數(shù)據(jù)幀的描述。我理解的是一個未解碼的壓縮數(shù)據(jù)幀。 read_thread這個最關(guān)鍵的讀取線程中,逐步跟蹤,可以明確stream_component_open---> decoder_start---> video_thread--->ffplay_video_thread。這個調(diào)用過程,在解碼開始后的異步解碼線程中,調(diào)用的是ffplay_video_thread。具體可...

    _Suqin 評論0 收藏0
  • android ijkplayer c分析-prepare過程讀取線程

    摘要:我們下面先從讀取線程入手。無論這個循環(huán)前后干了什么,都是要走這一步,讀取數(shù)據(jù)幀。從開始,我理解的是計算出當前數(shù)據(jù)幀的時間戳后再計算出播放的起始時間到當前時間,然后看這個時間戳是否在此范圍內(nèi)。 ijkplayer現(xiàn)在比較流行,因為工作關(guān)系,接觸了他,現(xiàn)在做個簡單的分析記錄吧。我這里直接跳過java層代碼,進入c層,因為大多數(shù)的工作都是通過jni調(diào)用到c層來完成的,java層的內(nèi)容并不是主...

    MobService 評論0 收藏0
  • android ijkplayer c分析-prepare過程讀取線程(續(xù)2-讀取輸入源)

    摘要:下面是,讀取頭信息頭信息。猜測網(wǎng)絡(luò)部分至少在一開始就應(yīng)當初始化好的,因此在的過程里面找,在中找到了。就先暫時分析到此吧。 這章要簡單分析下ijkplayer是如何從文件或網(wǎng)絡(luò)讀取數(shù)據(jù)源的。還是read_thread函數(shù)中的關(guān)鍵點avformat_open_input函數(shù): int avformat_open_input(AVFormatContext **ps, const char ...

    kevin 評論0 收藏0
  • android ijkplayer c分析-初始化(續(xù)1 javac銜接)

    摘要:初始化的過程上一篇其實并未完全分析完,這回接著來。層的函數(shù)中,最后還有的調(diào)用,走的是層的。結(jié)構(gòu)體如下的和,以及,其余是狀態(tài)及的內(nèi)容。整個過程是個異步的過程,并不阻塞。至于的東西,都是在層創(chuàng)建并填充的。 初始化的過程上一篇其實并未完全分析完,這回接著來。java層的initPlayer函數(shù)中,最后還有native_setup的調(diào)用,走的是c層的IjkMediaPlayer_native_...

    Olivia 評論0 收藏0

發(fā)表評論

0條評論

最新活動
閱讀需要支付1元查看
<