summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpsi29a <psi29a@gmail.com>2022-01-06 13:08:45 +0000
committerpsi29a <psi29a@gmail.com>2022-01-06 13:08:45 +0000
commit6a2cf2e358ef6610d68735d5652816658d858f72 (patch)
tree1451989de1da34cf588eebaadfdc68976a2732ce
parent1d4de71d35780db3e9a1de81c4dbc14da0f0f2c2 (diff)
parentf0db57661190b8bc94ad9fc337f5d4e25869c340 (diff)
Merge branch 'task/#6088_ffmpeg_api_deprecation_warnings' into 'master'
#6088 ffmpeg api deprecation warnings Closes #6088 See merge request OpenMW/openmw!1504
-rw-r--r--extern/osg-ffmpeg-videoplayer/videostate.cpp106
-rw-r--r--extern/osg-ffmpeg-videoplayer/videostate.hpp24
2 files changed, 69 insertions, 61 deletions
diff --git a/extern/osg-ffmpeg-videoplayer/videostate.cpp b/extern/osg-ffmpeg-videoplayer/videostate.cpp
index 7fcad33697..ff1d9c6517 100644
--- a/extern/osg-ffmpeg-videoplayer/videostate.cpp
+++ b/extern/osg-ffmpeg-videoplayer/videostate.cpp
@@ -4,10 +4,12 @@
#include <cassert>
#include <cstddef>
#include <iostream>
+#include <memory>
#include <thread>
#include <chrono>
#include <osg/Texture2D>
+#include <utility>
#if defined(_MSC_VER)
#pragma warning (push)
@@ -62,20 +64,20 @@ namespace
av_frame_free(&frame);
}
};
-
- template<class T>
- struct AVFree
- {
- void operator()(T* frame) const
- {
- av_free(&frame);
- }
- };
}
namespace Video
{
+struct PacketListFree
+{
+ void operator()(Video::PacketList* list) const
+ {
+ av_packet_free(&list->pkt);
+ av_free(&list);
+ }
+};
+
VideoState::VideoState()
: mAudioFactory(nullptr)
, format_ctx(nullptr)
@@ -95,7 +97,7 @@ VideoState::VideoState()
{
mFlushPktData = flush_pkt.data;
-// This is not needed anymore above FFMpeg version 4.0
+// This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_register_all();
#endif
@@ -114,25 +116,29 @@ void VideoState::setAudioFactory(MovieAudioFactory *factory)
void PacketQueue::put(AVPacket *pkt)
{
- std::unique_ptr<AVPacketList, AVFree<AVPacketList>> pkt1(static_cast<AVPacketList*>(av_malloc(sizeof(AVPacketList))));
+ std::unique_ptr<PacketList, PacketListFree> pkt1(static_cast<PacketList*>(av_malloc(sizeof(PacketList))));
if(!pkt1) throw std::bad_alloc();
+
if(pkt == &flush_pkt)
- pkt1->pkt = *pkt;
+ pkt1->pkt = pkt;
else
- av_packet_move_ref(&pkt1->pkt, pkt);
+ {
+ pkt1->pkt = av_packet_alloc();
+ av_packet_move_ref(pkt1->pkt, pkt);
+ }
pkt1->next = nullptr;
std::lock_guard<std::mutex> lock(this->mutex);
- AVPacketList* ptr = pkt1.release();
+ PacketList* ptr = pkt1.release();
if(!last_pkt)
this->first_pkt = ptr;
else
this->last_pkt->next = ptr;
this->last_pkt = ptr;
this->nb_packets++;
- this->size += ptr->pkt.size;
+ this->size += ptr->pkt->size;
this->cond.notify_one();
}
@@ -141,17 +147,17 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is)
std::unique_lock<std::mutex> lock(this->mutex);
while(!is->mQuit)
{
- AVPacketList *pkt1 = this->first_pkt;
+ PacketList *pkt1 = this->first_pkt;
if(pkt1)
{
this->first_pkt = pkt1->next;
if(!this->first_pkt)
this->last_pkt = nullptr;
this->nb_packets--;
- this->size -= pkt1->pkt.size;
+ this->size -= pkt1->pkt->size;
av_packet_unref(pkt);
- av_packet_move_ref(pkt, &pkt1->pkt);
+ av_packet_move_ref(pkt, pkt1->pkt);
av_free(pkt1);
return 1;
@@ -173,14 +179,14 @@ void PacketQueue::flush()
void PacketQueue::clear()
{
- AVPacketList *pkt, *pkt1;
+ PacketList *pkt, *pkt1;
std::lock_guard<std::mutex> lock(this->mutex);
for(pkt = this->first_pkt; pkt != nullptr; pkt = pkt1)
{
pkt1 = pkt->next;
- if (pkt->pkt.data != flush_pkt.data)
- av_packet_unref(&pkt->pkt);
+ if (pkt->pkt->data != flush_pkt.data)
+ av_packet_unref(pkt->pkt);
av_freep(&pkt);
}
this->last_pkt = nullptr;
@@ -304,7 +310,7 @@ void VideoState::video_refresh()
VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp);
- this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE;
+ this->pictq_rindex = (pictq_rindex+1) % pictq.size();
this->frame_last_pts = vp->pts;
this->pictq_size--;
this->pictq_cond.notify_one();
@@ -321,12 +327,12 @@ void VideoState::video_refresh()
for (; i<this->pictq_size-1; ++i)
{
if (this->pictq[pictq_rindex].pts + threshold <= this->get_master_clock())
- this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; // not enough time to show this picture
+ this->pictq_rindex = (this->pictq_rindex+1) % pictq.size(); // not enough time to show this picture
else
break;
}
- assert (this->pictq_rindex < VIDEO_PICTURE_ARRAY_SIZE);
+ assert (this->pictq_rindex < pictq.size());
VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp);
@@ -336,7 +342,7 @@ void VideoState::video_refresh()
this->pictq_size -= i;
// update queue for next picture
this->pictq_size--;
- this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE;
+ this->pictq_rindex = (this->pictq_rindex+1) % pictq.size();
this->pictq_cond.notify_one();
}
}
@@ -386,7 +392,7 @@ int VideoState::queue_picture(const AVFrame &pFrame, double pts)
0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize);
// now we inform our display thread that we have a pic ready
- this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE;
+ this->pictq_windex = (this->pictq_windex+1) % pictq.size();
this->pictq_size++;
return 0;
@@ -415,7 +421,7 @@ double VideoState::synchronize_video(const AVFrame &src_frame, double pts)
class VideoThread
{
public:
- VideoThread(VideoState* self)
+ explicit VideoThread(VideoState* self)
: mVideoState(self)
, mThread([this]
{
@@ -439,9 +445,8 @@ public:
void run()
{
VideoState* self = mVideoState;
- AVPacket packetData;
- av_init_packet(&packetData);
- std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
+ AVPacket* packetData = av_packet_alloc();
+ std::unique_ptr<AVPacket, AVPacketUnref> packet(packetData);
std::unique_ptr<AVFrame, AVFrameFree> pFrame{av_frame_alloc()};
while(self->videoq.get(packet.get(), self) >= 0)
@@ -491,7 +496,7 @@ private:
class ParseThread
{
public:
- ParseThread(VideoState* self)
+ explicit ParseThread(VideoState* self)
: mVideoState(self)
, mThread([this] { run(); })
{
@@ -507,9 +512,8 @@ public:
VideoState* self = mVideoState;
AVFormatContext *pFormatCtx = self->format_ctx;
- AVPacket packetData;
- av_init_packet(&packetData);
- std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
+ AVPacket* packetData = av_packet_alloc();
+ std::unique_ptr<AVPacket, AVPacketUnref> packet(packetData);
try
{
@@ -632,13 +636,11 @@ bool VideoState::update()
int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
{
- const AVCodec *codec;
-
if(stream_index < 0 || stream_index >= static_cast<int>(pFormatCtx->nb_streams))
return -1;
// Get a pointer to the codec context for the video stream
- codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
+ const AVCodec *codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
if(!codec)
{
fprintf(stderr, "Unsupported codec!\n");
@@ -654,7 +656,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
this->audio_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->audio_ctx, pFormatCtx->streams[stream_index]->codecpar);
-// This is not needed anymore above FFMpeg version 4.0
+// This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->audio_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif
@@ -674,7 +676,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
}
mAudioDecoder = mAudioFactory->createDecoder(this);
- if (!mAudioDecoder.get())
+ if (!mAudioDecoder)
{
std::cerr << "Failed to create audio decoder, can not play audio stream" << std::endl;
avcodec_free_context(&this->audio_ctx);
@@ -691,7 +693,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
this->video_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->video_ctx, pFormatCtx->streams[stream_index]->codecpar);
-// This is not needed anymore above FFMpeg version 4.0
+// This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->video_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif
@@ -702,7 +704,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
return -1;
}
- this->video_thread.reset(new VideoThread(this));
+ this->video_thread = std::make_unique<VideoThread>(this);
break;
default:
@@ -721,8 +723,8 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
this->av_sync_type = AV_SYNC_DEFAULT;
this->mQuit = false;
- this->stream = inputstream;
- if(!this->stream.get())
+ this->stream = std::move(inputstream);
+ if(!this->stream)
throw std::runtime_error("Failed to open video resource");
AVIOContext *ioCtx = avio_alloc_context(nullptr, 0, 0, this, istream_read, istream_write, istream_seek);
@@ -789,7 +791,7 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
}
- this->parse_thread.reset(new ParseThread(this));
+ this->parse_thread = std::make_unique<ParseThread>(this);
}
void VideoState::deinit()
@@ -801,11 +803,11 @@ void VideoState::deinit()
mAudioDecoder.reset();
- if (this->parse_thread.get())
+ if (this->parse_thread)
{
this->parse_thread.reset();
}
- if (this->video_thread.get())
+ if (this->video_thread)
{
this->video_thread.reset();
}
@@ -850,9 +852,9 @@ void VideoState::deinit()
mTexture = nullptr;
}
- // Dellocate RGBA frame queue.
- for (std::size_t i = 0; i < VIDEO_PICTURE_ARRAY_SIZE; ++i)
- this->pictq[i].rgbaFrame = nullptr;
+ // Deallocate RGBA frame queue.
+ for (auto & i : this->pictq)
+ i.rgbaFrame = nullptr;
}
@@ -870,14 +872,14 @@ double VideoState::get_master_clock()
return this->get_external_clock();
}
-double VideoState::get_video_clock()
+double VideoState::get_video_clock() const
{
return this->frame_last_pts;
}
double VideoState::get_audio_clock()
{
- if (!mAudioDecoder.get())
+ if (!mAudioDecoder)
return 0.0;
return mAudioDecoder->getAudioClock();
}
@@ -896,7 +898,7 @@ void VideoState::seekTo(double time)
mSeekRequested = true;
}
-double VideoState::getDuration()
+double VideoState::getDuration() const
{
return this->format_ctx->duration / 1000000.0;
}
diff --git a/extern/osg-ffmpeg-videoplayer/videostate.hpp b/extern/osg-ffmpeg-videoplayer/videostate.hpp
index 32a772f299..d1592bd910 100644
--- a/extern/osg-ffmpeg-videoplayer/videostate.hpp
+++ b/extern/osg-ffmpeg-videoplayer/videostate.hpp
@@ -1,8 +1,9 @@
#ifndef VIDEOPLAYER_VIDEOSTATE_H
#define VIDEOPLAYER_VIDEOSTATE_H
-#include <stdint.h>
+#include <cstdint>
#include <atomic>
+#include <array>
#include <vector>
#include <memory>
#include <string>
@@ -40,13 +41,10 @@ extern "C"
#include "videodefs.hpp"
#define VIDEO_PICTURE_QUEUE_SIZE 50
-// allocate one extra to make sure we do not overwrite the osg::Image currently set on the texture
-#define VIDEO_PICTURE_ARRAY_SIZE (VIDEO_PICTURE_QUEUE_SIZE+1)
extern "C"
{
struct SwsContext;
- struct AVPacketList;
struct AVPacket;
struct AVFormatContext;
struct AVStream;
@@ -78,6 +76,13 @@ struct ExternalClock
void set(uint64_t time);
};
+class PacketList
+{
+public:
+ AVPacket* pkt = nullptr;
+ PacketList *next = nullptr;
+};
+
struct PacketQueue {
PacketQueue()
: first_pkt(nullptr), last_pkt(nullptr), flushing(false), nb_packets(0), size(0)
@@ -85,7 +90,7 @@ struct PacketQueue {
~PacketQueue()
{ clear(); }
- AVPacketList *first_pkt, *last_pkt;
+ PacketList *first_pkt, *last_pkt;
std::atomic<bool> flushing;
std::atomic<int> nb_packets;
std::atomic<int> size;
@@ -129,7 +134,7 @@ struct VideoState {
void setPaused(bool isPaused);
void seekTo(double time);
- double getDuration();
+ double getDuration() const;
int stream_open(int stream_index, AVFormatContext *pFormatCtx);
@@ -145,7 +150,7 @@ struct VideoState {
double synchronize_video(const AVFrame &src_frame, double pts);
double get_audio_clock();
- double get_video_clock();
+ double get_video_clock() const;
double get_external_clock();
double get_master_clock();
@@ -178,8 +183,9 @@ struct VideoState {
PacketQueue videoq;
SwsContext* sws_context;
int sws_context_w, sws_context_h;
- VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE];
- int pictq_size, pictq_rindex, pictq_windex;
+ std::array<VideoPicture, VIDEO_PICTURE_QUEUE_SIZE+1> pictq; // allocate one extra to make sure we do not overwrite the osg::Image currently set on the texture
+ int pictq_size;
+ unsigned long pictq_rindex, pictq_windex;
std::mutex pictq_mutex;
std::condition_variable pictq_cond;