├── .gitignore
├── qml.qrc
├── openglinterop.h
├── videosource.cpp
├── openglinterop.cpp
├── hwdecoderfactory.h
├── vaapidecoder.h
├── d3d9decoder.h
├── videosource.h
├── d3d9decoder.cpp
├── surface.h
├── hwdecoderfactory.cpp
├── videoframe.h
├── framerenderer.h
├── videorenderer.h
├── surfaced3d9.h
├── vaapidecoder.cpp
├── README.md
├── main.cpp
├── surface.cpp
├── videoframe.cpp
├── surfacevaapi.h
├── fileprocessor.h
├── main.qml
├── hwdecoder.h
├── d3d9interop.h
├── videorenderer.cpp
├── HWDecoding.pro
├── d3d9interop.cpp
├── fileprocessor.cpp
├── framerenderer.cpp
├── surfacevaapi.cpp
├── yuv2rgb.h
├── hwdecoder.cpp
├── surfaced3d9.cpp
└── yuv2rgb.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pro.user
2 | *.pri.user
3 |
--------------------------------------------------------------------------------
/qml.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | main.qml
4 |
5 |
6 |
--------------------------------------------------------------------------------
/openglinterop.h:
--------------------------------------------------------------------------------
1 | #ifndef OPENGLINTEROP_H
2 | #define OPENGLINTEROP_H
3 |
4 | class OpenGLInterop
5 | {
6 | public:
7 | static void initialize();
8 | };
9 |
10 | #endif // OPENGLINTEROP_H
11 |
--------------------------------------------------------------------------------
/videosource.cpp:
--------------------------------------------------------------------------------
1 | #include "videosource.h"
2 |
3 | VideoSource::VideoSource(QObject * parent) : QObject(parent)
4 | {
5 | }
6 |
7 | void VideoSource::setFrame(VideoFramePtr frame)
8 | {
9 | Q_EMIT frameReady(frame);
10 | }
11 |
--------------------------------------------------------------------------------
/openglinterop.cpp:
--------------------------------------------------------------------------------
1 | #include "openglinterop.h"
2 | #include
3 |
4 | #if defined(Q_OS_WIN)
5 | #include "d3d9interop.h"
6 | #endif
7 |
8 | void OpenGLInterop::initialize()
9 | {
10 | #if defined(Q_OS_WIN)
11 | D3D9Interop::instance();
12 | #endif
13 | }
14 |
--------------------------------------------------------------------------------
/hwdecoderfactory.h:
--------------------------------------------------------------------------------
1 | #ifndef HWDECODERFACTORY_H
2 | #define HWDECODERFACTORY_H
3 |
4 | #include
5 | #include "hwdecoder.h"
6 |
7 | class HWDecoderFactory
8 | {
9 | public:
10 | static HWDecoder *createDecoder(QObject * parent = nullptr);
11 |
12 | };
13 |
14 | #endif // HWDECODERFACTORY_H
15 |
--------------------------------------------------------------------------------
/vaapidecoder.h:
--------------------------------------------------------------------------------
1 | #ifndef VAAPIDECODER_H
2 | #define VAAPIDECODER_H
3 |
4 | #include "hwdecoder.h"
5 |
6 | class VAAPIDecoder: public HWDecoder
7 | {
8 | public:
9 | VAAPIDecoder(QObject * parent = nullptr);
10 |
11 | // HWDecoder interface
12 | private:
13 | virtual VideoFrame* createHWVideoFrame(const AVFrame *frame) override;
14 | };
15 |
16 | #endif // VAAPIDECODER_H
17 |
--------------------------------------------------------------------------------
/d3d9decoder.h:
--------------------------------------------------------------------------------
1 | #ifndef HWWINDOWSDECODER_H
2 | #define HWWINDOWSDECODER_H
3 |
4 | #include "hwdecoder.h"
5 |
6 | class D3D9Decoder: public HWDecoder
7 | {
8 | public:
9 | D3D9Decoder(QObject * parent = nullptr);
10 |
11 | // HWDecoder interface
12 | private:
13 | virtual VideoFrame* createHWVideoFrame(const AVFrame *frame) override;
14 | };
15 |
16 | #endif // HWWINDOWSDECODER_H
17 |
--------------------------------------------------------------------------------
/videosource.h:
--------------------------------------------------------------------------------
1 | #ifndef VIDEOSOURCE_H
2 | #define VIDEOSOURCE_H
3 |
4 | #include
5 | #include "videoframe.h"
6 |
7 | class VideoSource: public QObject
8 | {
9 | Q_OBJECT
10 | public:
11 | VideoSource(QObject * parent = nullptr);
12 |
13 | public Q_SLOTS:
14 | void setFrame(VideoFramePtr frame);
15 |
16 | Q_SIGNALS:
17 | void frameReady(VideoFramePtr frame);
18 |
19 | };
20 |
21 | #endif // VIDEOSOURCE_H
22 |
--------------------------------------------------------------------------------
/d3d9decoder.cpp:
--------------------------------------------------------------------------------
1 | #include "d3d9decoder.h"
2 | #include "surfaced3d9.h"
3 |
4 |
5 | D3D9Decoder::D3D9Decoder(QObject * parent) : HWDecoder(parent)
6 | {
7 | m_deviceName = "dxva2";
8 | m_hwPixFmt = AV_PIX_FMT_DXVA2_VLD;
9 | }
10 |
11 | VideoFrame* D3D9Decoder::createHWVideoFrame(const AVFrame *frame)
12 | {
13 | IDirect3DSurface9 *d3d9surface = (IDirect3DSurface9*)frame->data[3];
14 | SurfaceD3D9* videoSurface = new SurfaceD3D9(d3d9surface, frame->width, frame->height);
15 | return new VideoFrame(videoSurface);
16 | }
17 |
--------------------------------------------------------------------------------
/surface.h:
--------------------------------------------------------------------------------
1 | #ifndef SURFACE_H
2 | #define SURFACE_H
3 |
4 | #include
5 | #include
6 |
7 | class Surface: public QObject
8 | {
9 | Q_OBJECT
10 | public:
11 | Surface(int width, int height, QObject * parent = nullptr);
12 | virtual ~Surface();
13 |
14 | virtual bool map(GLuint name);
15 | virtual bool unmap();
16 |
17 | virtual int width();
18 | virtual int height();
19 |
20 | protected:
21 | void initGLFunctions();
22 |
23 | int m_width;
24 | int m_height;
25 | QOpenGLFunctions m_glFunctions;
26 | QByteArray m_rgbData;
27 | };
28 |
29 | #endif // SURFACE_H
30 |
--------------------------------------------------------------------------------
/hwdecoderfactory.cpp:
--------------------------------------------------------------------------------
1 | #include "hwdecoderfactory.h"
2 |
3 | #if defined(Q_OS_WIN)
4 | #include "d3d9decoder.h"
5 | #endif
6 |
7 | #if defined(Q_OS_LINUX) && !defined(Q_OS_ANDROID)
8 | #include "vaapidecoder.h"
9 | #endif
10 |
11 | #if defined(Q_OS_LINUX) && !defined(Q_OS_ANDROID)
12 | #include "vaapidecoder.h"
13 | #endif
14 |
15 | HWDecoder *HWDecoderFactory::createDecoder(QObject *parent)
16 | {
17 | #if defined(Q_OS_WIN)
18 | return new D3D9Decoder(parent);
19 | #endif
20 |
21 | #if defined(Q_OS_LINUX) && !defined(Q_OS_ANDROID)
22 | return new VAAPIDecoder(parent);
23 | #endif
24 |
25 | #if defined(Q_OS_LINUX) && !defined(Q_OS_ANDROID)
26 | return new VAAPIDecoder(parent);
27 | #endif
28 | }
29 |
--------------------------------------------------------------------------------
/videoframe.h:
--------------------------------------------------------------------------------
1 | #ifndef VIDEOFRAME_H
2 | #define VIDEOFRAME_H
3 |
4 | #include
5 | #include
6 | #include "surface.h"
7 |
8 | class VideoFrame: public QObject
9 | {
10 | Q_OBJECT
11 | public:
12 | VideoFrame(QObject * parent = nullptr);
13 | VideoFrame(Surface * surface, QObject * parent = nullptr);
14 | ~VideoFrame();
15 |
16 | bool map(GLuint name);
17 | void unmap();
18 |
19 | int width();
20 | int height();
21 |
22 | private:
23 | Surface * m_surface;
24 |
25 | void registerMetaType();
26 | };
27 |
28 | typedef QSharedPointer VideoFramePtr;
29 | Q_DECLARE_METATYPE(VideoFramePtr)
30 |
31 | #endif // VIDEOFRAME_H
32 |
--------------------------------------------------------------------------------
/framerenderer.h:
--------------------------------------------------------------------------------
1 | #ifndef FRAMERENDERER_H
2 | #define FRAMERENDERER_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include "videoframe.h"
9 |
10 | class FrameRenderer: public QOpenGLFunctions
11 | {
12 | public:
13 | FrameRenderer();
14 | ~FrameRenderer();
15 |
16 | void initialize();
17 | void render();
18 | void setFrame(VideoFramePtr frame);
19 |
20 | private:
21 | void initShaderProgram();
22 | void initGeometry();
23 |
24 | QVector m_vertices;
25 | QVector m_normals;
26 | QOpenGLShaderProgram m_shaderProgram;
27 | int m_in_pos;
28 | int m_in_tc;
29 |
30 | VideoFramePtr m_frame;
31 | GLuint m_texture;
32 | };
33 |
34 | #endif // FRAMERENDERER_H
35 |
--------------------------------------------------------------------------------
/videorenderer.h:
--------------------------------------------------------------------------------
1 | #ifndef VIDEORENDERER_H
2 | #define VIDEORENDERER_H
3 |
4 | #include
5 | #include "videosource.h"
6 |
7 | class VideoFBORenderer : public QQuickFramebufferObject
8 | {
9 | Q_OBJECT
10 | Q_PROPERTY(VideoSource* source READ source WRITE setSource NOTIFY sourceChanged)
11 | public:
12 | VideoFBORenderer(QQuickItem *parent = nullptr);
13 | Renderer *createRenderer() const;
14 |
15 | void setSource(VideoSource * source);
16 | VideoSource *source() const;
17 |
18 | VideoFramePtr frame() const;
19 |
20 | Q_SIGNALS:
21 | void sourceChanged();
22 |
23 | private Q_SLOTS:
24 | void onFrameReady(VideoFramePtr frame);
25 |
26 | private:
27 | VideoSource * m_source;
28 | VideoFramePtr m_frame;
29 | };
30 |
31 | #endif // VIDEORENDERER_H
32 |
--------------------------------------------------------------------------------
/surfaced3d9.h:
--------------------------------------------------------------------------------
1 | #ifndef SURFACED3D9_H
2 | #define SURFACED3D9_H
3 |
4 | #include "surface.h"
5 | #include
6 |
7 | class SurfaceD3D9: public Surface
8 | {
9 | public:
10 | SurfaceD3D9(IDirect3DSurface9 * surface, int width, int height);
11 | ~SurfaceD3D9();
12 |
13 | // Surface interface
14 | virtual bool map(GLuint name) override;
15 | virtual bool unmap() override;
16 |
17 | private:
18 | void extractSurfaceData();
19 | QByteArray cropImage(D3DLOCKED_RECT & lockedRect);
20 |
21 | IDirect3DDevice9 *m_device;
22 | IDirect3DTexture9 *m_texture;
23 | IDirect3DSurface9 *m_surface;
24 | IDirect3DTexture9 *m_origTexture;
25 | IDirect3DSurface9 *m_origSurface;
26 | D3DSURFACE_DESC m_surfaceDesc;
27 |
28 | //NV/DX Interop Handles
29 | HANDLE m_shareHandle;
30 | HANDLE gl_handleD3D;
31 | HANDLE gl_handle;
32 | };
33 |
34 | #endif // SURFACED3D9_H
35 |
--------------------------------------------------------------------------------
/vaapidecoder.cpp:
--------------------------------------------------------------------------------
1 | #include "vaapidecoder.h"
2 | #include "surfacevaapi.h"
3 |
4 | extern "C" {
5 | #include "libavutil/hwcontext_vaapi.h"
6 | }
7 |
8 | VAAPIDecoder::VAAPIDecoder(QObject * parent) : HWDecoder(parent)
9 | {
10 | m_deviceName = "vaapi";
11 | m_hwPixFmt = AV_PIX_FMT_VAAPI_VLD;
12 | }
13 |
14 | VideoFrame* VAAPIDecoder::createHWVideoFrame(const AVFrame *frame)
15 | {
16 | //Get VAAPI Display from FFMpeg hw frame context
17 | AVHWFramesContext* hwframesCtx = (AVHWFramesContext*)frame->hw_frames_ctx->data;
18 | AVVAAPIDeviceContext* vaapiDeviceContext = (AVVAAPIDeviceContext*)hwframesCtx->device_ctx->hwctx;
19 | VADisplay display = vaapiDeviceContext->display;
20 |
21 | VASurfaceID surface = (VASurfaceID)(uintptr_t)frame->data[3];
22 |
23 | SurfaceVAAPI* vaapiSurface = new SurfaceVAAPI(display, surface, frame->width, frame->height);
24 |
25 | return new VideoFrame(vaapiSurface);
26 | }
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Video Hardware Accelerated
2 | Example of using FFmpeg for decode encode and rendering with HW acceleration.
3 |
4 | ### Windows Build
5 | #### Needs:
6 | * MS Visual Studio 2015/2017
7 | * Qt 5.7 or greater
8 | * FFmpeg (tested with n3.4.2)
9 | * Mysys64
10 | * Mingw
11 |
12 | #### Steps:
13 | 1. Download MS Visual Studio 2015/2017
14 | 2. Download & install Qt for Windows and msvc kit
15 | 3. Download Msys64
16 | 4. Setup Msys64 with developer tools
17 | 5. Download & build FFmpeg
18 |
19 | ```shell
20 | ./configure --prefix={Your/FFmpeg/build/Path} --disable-doc --disable-debug --enable-shared --disable-static --enable-runtime-cpudetect --disable-postproc --toolchain=msvc --enable-pic --extra-cflags="-MD" --extra-libs=user32.lib --disable-avresample --enable-hwaccels --enable-dxva2 --enable-libmfx --enable-nonfree --enable-gpl --enable-libx264
21 | ```
22 |
23 | ```shell
24 | make -j4 install
25 | ```
26 |
27 | 6. Put your FFmpeg build into your Qt dir
28 |
--------------------------------------------------------------------------------
/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "fileprocessor.h"
4 | #include "videorenderer.h"
5 | #include "openglinterop.h"
6 |
7 | int main(int argc, char *argv[])
8 | {
9 |
10 | #if defined(Q_OS_WIN)
11 | QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
12 | #endif
13 |
14 | QGuiApplication app(argc, argv);
15 |
16 | //Fnadales: Needed to check OpenGLExtensions on main Thread
17 | OpenGLInterop::initialize();
18 |
19 | QQmlApplicationEngine engine;
20 | qmlRegisterUncreatableType("VideoHW", 0, 1, "VideoSource", "C++ Created");
21 | qmlRegisterSingletonType("VideoHW", 0, 1, "FileProcessor", FileProcessorInstance);
22 | qmlRegisterType("VideoHW", 0, 1, "VideoRenderer");
23 |
24 | engine.load(QUrl(QStringLiteral("qrc:/main.qml")));
25 | if (engine.rootObjects().isEmpty())
26 | return -1;
27 |
28 | return app.exec();
29 | }
30 |
--------------------------------------------------------------------------------
/surface.cpp:
--------------------------------------------------------------------------------
1 | #include "surface.h"
2 |
3 | Surface::Surface(int width, int height, QObject * parent)
4 | : QObject(parent),
5 | m_width(width),
6 | m_height(height)
7 | {
8 |
9 | }
10 |
11 | Surface::~Surface() {
12 |
13 | }
14 |
15 | bool Surface::map(GLuint name) {
16 | initGLFunctions();
17 |
18 | //TODO: map Data to 2D Texture
19 | m_glFunctions.glBindTexture(GL_TEXTURE_2D, name);
20 | m_glFunctions.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_width, m_height, 0, GL_RGB,
21 | GL_UNSIGNED_BYTE, m_rgbData.data());
22 |
23 | return true;
24 | }
25 |
26 | bool Surface::unmap(){
27 | m_glFunctions.glBindTexture(GL_TEXTURE_2D, 0);
28 | return true;
29 | }
30 |
31 | int Surface::width() {
32 | return m_width;
33 | }
34 |
35 | int Surface::height() {
36 | return m_height;
37 | }
38 |
39 | void Surface::initGLFunctions()
40 | {
41 | static bool glInitialized = false;
42 | if (!glInitialized)
43 | m_glFunctions.initializeOpenGLFunctions();
44 | }
45 |
--------------------------------------------------------------------------------
/videoframe.cpp:
--------------------------------------------------------------------------------
1 | #include "videoframe.h"
2 |
3 | VideoFrame::VideoFrame(QObject *parent): QObject(parent), m_surface(nullptr)
4 | {
5 | registerMetaType();
6 | }
7 |
8 | VideoFrame::VideoFrame(Surface * surface, QObject * parent) : VideoFrame(parent)
9 | {
10 | m_surface = surface;
11 | }
12 |
13 | VideoFrame::~VideoFrame()
14 | {
15 | if (m_surface)
16 | delete m_surface;
17 | }
18 |
19 | void VideoFrame::registerMetaType()
20 | {
21 | static bool registered = false;
22 | if (!registered) {
23 | qRegisterMetaType("VideoFramePtr");
24 | registered = true;
25 | }
26 | }
27 |
28 | bool VideoFrame::map(GLuint name)
29 | {
30 | if (!m_surface) return false;
31 | return m_surface->map(name);
32 | }
33 |
34 | void VideoFrame::unmap()
35 | {
36 | if (!m_surface) return;
37 | m_surface->unmap();
38 | }
39 |
40 | int VideoFrame::width()
41 | {
42 | if (!m_surface) return 0;
43 | return m_surface->width();
44 | }
45 |
46 | int VideoFrame::height()
47 | {
48 | if (!m_surface) return 0;
49 | return m_surface->height();
50 | }
51 |
--------------------------------------------------------------------------------
/surfacevaapi.h:
--------------------------------------------------------------------------------
1 | #ifndef SURFACEVAAPI_H
2 | #define SURFACEVAAPI_H
3 |
4 | #include "surface.h"
5 | #include
6 | #include
7 |
8 | extern "C" {
9 | #include "libavutil/hwcontext_vaapi.h"
10 | }
11 |
12 | typedef void (*t_glx_bind)(Display *, GLXDrawable, int , const int *);
13 | typedef void (*t_glx_release)(Display *, GLXDrawable, int);
14 |
15 | class SurfaceVAAPI: public Surface
16 | {
17 | public:
18 | SurfaceVAAPI(VADisplay display, VASurfaceID surface, int width, int height, QObject * parent = nullptr);
19 | ~SurfaceVAAPI();
20 |
21 | // Surface interface
22 | public:
23 | bool map(GLuint name);
24 | bool unmap();
25 |
26 | private:
27 | bool ensureDisplay();
28 | bool ensurePixmap();
29 |
30 | //OpenGL X11 Extensions for ZeroCopy
31 | bool initGLXFunctions();
32 | void resetGLXFunctions();
33 | bool checkGLXFunctions();
34 |
35 | t_glx_bind glXBindTexImageEXT;
36 | t_glx_release glXReleaseTexImageEXT;
37 |
38 | GLXPixmap m_glxPixmap;
39 | GLXFBConfig m_config;
40 |
41 | //VAAPI instances
42 | VADisplay m_vaDisplay;
43 | VASurfaceID m_surface;
44 |
45 | //X11 instances
46 | Display *m_x11Display;
47 | Pixmap m_pixmap;
48 | };
49 |
50 | #endif // SURFACEVAAPI_H
51 |
--------------------------------------------------------------------------------
/fileprocessor.h:
--------------------------------------------------------------------------------
1 | #ifndef FILEPROCESSOR_H
2 | #define FILEPROCESSOR_H
3 |
4 | #include
5 | #include
6 |
7 | #include "videosource.h"
8 | #include "hwdecoderfactory.h"
9 |
10 | class FileProcessor : public QObject
11 | {
12 | Q_OBJECT
13 | Q_PROPERTY(VideoSource* source READ getSource WRITE setSource NOTIFY sourceChanged)
14 | Q_PROPERTY(bool processing READ processing WRITE setProcessing NOTIFY processingChanged)
15 | public:
16 | explicit FileProcessor(QObject *parent = nullptr);
17 | ~FileProcessor();
18 |
19 | Q_INVOKABLE void processMedia(const QUrl & input);
20 | Q_INVOKABLE void stopProcess();
21 |
22 | VideoSource *getSource() const;
23 | void setSource(VideoSource *source);
24 |
25 | bool processing();
26 | void setProcessing(bool status);
27 |
28 | Q_SIGNALS:
29 | void sourceChanged();
30 | void processingChanged();
31 |
32 | private:
33 | void processFile(const QString & input);
34 | void decodeFile(const QString & input);
35 |
36 |
37 | HWDecoder* m_decoder;
38 | VideoSource* m_source;
39 |
40 | bool m_processing;
41 | QMutex m_mutex;
42 | };
43 |
44 | static QObject *FileProcessorInstance(QQmlEngine *engine, QJSEngine *scriptEngine)
45 | {
46 | Q_UNUSED(engine)
47 | Q_UNUSED(scriptEngine)
48 |
49 | return new FileProcessor();
50 | }
51 |
52 | #endif // FILEPROCESSOR_H
53 |
--------------------------------------------------------------------------------
/main.qml:
--------------------------------------------------------------------------------
1 | import QtQuick 2.7
2 | import QtQuick.Window 2.2
3 | import QtQuick.Controls 2.0
4 | import QtQuick.Layouts 1.3
5 | import QtQuick.Dialogs 1.2
6 |
7 | import VideoHW 0.1
8 |
9 | Window {
10 | visible: true
11 | width: 640
12 | height: 480
13 |
14 | ColumnLayout {
15 | anchors.fill: parent
16 |
17 | VideoRenderer {
18 | id: renderer
19 | anchors.left: parent.left
20 | anchors.right: parent.right
21 | anchors.margins: 10
22 | Layout.fillHeight: true
23 |
24 | source: FileProcessor.source
25 | }
26 |
27 | RowLayout {
28 | anchors.left: parent.left
29 | anchors.right: parent.right
30 |
31 | TextField {
32 | id:inputStr
33 | Layout.fillWidth: true
34 | }
35 | Button {
36 | id:inputBtn
37 | text: 'Open Video'
38 | onClicked: openDialog.open()
39 | }
40 | }
41 |
42 | Button {
43 | id: decodeBtn
44 | Layout.alignment: Qt.AlignHCenter
45 | text: FileProcessor.processing? 'Stop' : 'Start'
46 |
47 | onClicked: {
48 | if (FileProcessor.processing)
49 | FileProcessor.stopProcess();
50 | else
51 | FileProcessor.processMedia(openDialog.fileUrl);
52 | }
53 | }
54 | }
55 |
56 | FileDialog {
57 | id: openDialog
58 | selectFolder: false
59 | selectMultiple: false
60 | onAccepted: inputStr.text = openDialog.fileUrl
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/hwdecoder.h:
--------------------------------------------------------------------------------
1 | #ifndef HWDECODER_H
2 | #define HWDECODER_H
3 |
4 | #include "videoframe.h"
5 |
6 | extern "C" {
7 | #include
8 | #include
9 | #include
10 | }
11 |
12 | class HWDecoder: public QObject
13 | {
14 | Q_OBJECT
15 | public:
16 | static enum AVPixelFormat getFormat(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts);
17 |
18 | HWDecoder(QObject * parent = nullptr);
19 | ~HWDecoder();
20 |
21 | bool init(AVCodecParameters* codecParameters);
22 | bool open();
23 | void close();
24 | void flush();
25 |
26 | int decode(AVPacket *packet);
27 |
28 | Q_SIGNALS:
29 | void frameDecoded(VideoFramePtr frame);
30 |
31 | protected:
32 | QString m_deviceName;
33 | static AVPixelFormat m_hwPixFmt;
34 |
35 | private:
36 | int initHWContext(const enum AVHWDeviceType m_type);
37 | void sendFrame(VideoFrame * frame);
38 |
39 | virtual VideoFrame* createHWVideoFrame(const AVFrame * frame) = 0;
40 | VideoFrame* createSWVideoFrame(const AVFrame *frame);
41 |
42 | AVCodecContext *m_decoderCtx;
43 | AVHWDeviceType m_type;
44 | AVBufferRef *m_hwDeviceCtx;
45 |
46 | AVCodec *m_decoder;
47 | };
48 |
49 | struct AVFrameDeleter
50 | {
51 | static inline void cleanup(void *pointer) {
52 | if (pointer)
53 | av_frame_free((AVFrame**)&pointer);
54 | }
55 | };
56 |
57 | struct AVPacketDeleter
58 | {
59 | static inline void cleanup(void *pointer) {
60 | if (pointer)
61 | av_packet_unref((AVPacket*)pointer);
62 | }
63 | };
64 |
65 | struct AVFormatContextDeleter
66 | {
67 | static inline void cleanup(void *pointer) {
68 | if (pointer)
69 | avformat_close_input((AVFormatContext**)&pointer);
70 | }
71 | };
72 |
73 | #endif // HWDECODER_H
74 |
--------------------------------------------------------------------------------
/d3d9interop.h:
--------------------------------------------------------------------------------
1 | #ifndef D3D9INTEROP_H
2 | #define D3D9INTEROP_H
3 |
4 | #include
5 |
6 | //#include //not found in vs2013
7 | //https://www.opengl.org/registry/specs/NV/DX_interop.txt
8 | #ifndef WGL_ACCESS_READ_ONLY_NV
9 | #define WGL_ACCESS_READ_ONLY_NV 0x00000000
10 | #define WGL_ACCESS_READ_WRITE_NV 0x00000001
11 | #define WGL_ACCESS_WRITE_DISCARD_NV 0x00000002
12 | typedef BOOL (WINAPI * PFNWGLDXSETRESOURCESHAREHANDLENVPROC) (void *dxObject, HANDLE shareHandle);
13 | typedef HANDLE (WINAPI * PFNWGLDXOPENDEVICENVPROC) (void *dxDevice);
14 | typedef BOOL (WINAPI * PFNWGLDXCLOSEDEVICENVPROC) (HANDLE hDevice);
15 | typedef HANDLE (WINAPI * PFNWGLDXREGISTEROBJECTNVPROC) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access);
16 | typedef BOOL (WINAPI * PFNWGLDXUNREGISTEROBJECTNVPROC) (HANDLE hDevice, HANDLE hObject);
17 | typedef BOOL (WINAPI * PFNWGLDXOBJECTACCESSNVPROC) (HANDLE hObject, GLenum access);
18 | typedef BOOL (WINAPI * PFNWGLDXLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects);
19 | typedef BOOL (WINAPI * PFNWGLDXUNLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects);
20 | #endif
21 |
22 | class D3D9Interop
23 | {
24 | public:
25 | static D3D9Interop* instance();
26 | bool enabled();
27 |
28 | PFNWGLDXOPENDEVICENVPROC wglDXOpenDeviceNV;
29 | PFNWGLDXCLOSEDEVICENVPROC wglDXCloseDeviceNV;
30 |
31 | PFNWGLDXREGISTEROBJECTNVPROC wglDXRegisterObjectNV;
32 | PFNWGLDXUNREGISTEROBJECTNVPROC wglDXUnregisterObjectNV;
33 |
34 | PFNWGLDXLOCKOBJECTSNVPROC wglDXLockObjectsNV;
35 | PFNWGLDXUNLOCKOBJECTSNVPROC wglDXUnlockObjectsNV;
36 |
37 | PFNWGLDXSETRESOURCESHAREHANDLENVPROC wglDXSetResourceShareHandleNV;
38 | PFNWGLDXOBJECTACCESSNVPROC wglDXObjectAccessNV;
39 |
40 | private:
41 | D3D9Interop();
42 | ~D3D9Interop();
43 |
44 | bool _initWGLFunctions();
45 | void _resetWGLFunctions();
46 | bool _checkWGLFunctions();
47 | };
48 |
49 | #endif // D3D9INTEROP_H
50 |
--------------------------------------------------------------------------------
/videorenderer.cpp:
--------------------------------------------------------------------------------
1 | #include "videorenderer.h"
2 | #include "framerenderer.h"
3 | #include "videosource.h"
4 |
5 | #include
6 |
7 | class VideoRenderer : public QQuickFramebufferObject::Renderer
8 | {
9 | public:
10 | VideoRenderer()
11 | {
12 | m_frameRenderer.initialize();
13 | }
14 |
15 | ~VideoRenderer() {
16 | }
17 |
18 | void render() {
19 | m_frameRenderer.render();
20 | }
21 |
22 | QOpenGLFramebufferObject *createFramebufferObject(const QSize &size) {
23 | QOpenGLFramebufferObjectFormat format;
24 | format.setAttachment(QOpenGLFramebufferObject::CombinedDepthStencil);
25 | format.setSamples(4);
26 | return new QOpenGLFramebufferObject(size, format);
27 | }
28 |
29 | private:
30 | FrameRenderer m_frameRenderer;
31 |
32 | // Renderer interface
33 | protected:
34 | virtual void synchronize(QQuickFramebufferObject * renderer) override {
35 | VideoFBORenderer* fboRenderer = (VideoFBORenderer*)renderer;
36 | if (fboRenderer)
37 | m_frameRenderer.setFrame(fboRenderer->frame());
38 | }
39 | };
40 |
41 | VideoFBORenderer::VideoFBORenderer(QQuickItem *parent)
42 | : QQuickFramebufferObject(parent), m_source(nullptr), m_frame(nullptr)
43 | {
44 |
45 | }
46 |
47 | QQuickFramebufferObject::Renderer *VideoFBORenderer::createRenderer() const
48 | {
49 | return new VideoRenderer();
50 | }
51 |
52 | void VideoFBORenderer::setSource(VideoSource *source)
53 | {
54 | if (m_source != source){
55 |
56 | if (m_source)
57 | disconnect(m_source);
58 |
59 | m_source = source;
60 |
61 | if (m_source)
62 | connect(m_source, &VideoSource::frameReady, this, &VideoFBORenderer::onFrameReady);
63 |
64 | Q_EMIT sourceChanged();
65 | }
66 | }
67 |
68 | VideoSource *VideoFBORenderer::source() const
69 | {
70 | return m_source;
71 | }
72 |
73 | void VideoFBORenderer::onFrameReady(VideoFramePtr frame)
74 | {
75 | m_frame = frame;
76 | update();
77 | }
78 |
79 | VideoFramePtr VideoFBORenderer::frame() const
80 | {
81 | return m_frame;
82 | }
83 |
--------------------------------------------------------------------------------
/HWDecoding.pro:
--------------------------------------------------------------------------------
1 | QT += quick concurrent gui
2 | CONFIG += c++11
3 |
4 | # The following define makes your compiler emit warnings if you use
5 | # any feature of Qt which as been marked deprecated (the exact warnings
6 | # depend on your compiler). Please consult the documentation of the
7 | # deprecated API in order to know how to port your code away from it.
8 | DEFINES += QT_DEPRECATED_WARNINGS
9 |
10 | # You can also make your code fail to compile if you use deprecated APIs.
11 | # In order to do so, uncomment the following line.
12 | # You can also select to disable deprecated APIs only up to a certain version of Qt.
13 | #DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0
14 |
15 | SOURCES += main.cpp \
16 | hwdecoder.cpp \
17 | hwdecoderfactory.cpp \
18 | videorenderer.cpp \
19 | framerenderer.cpp \
20 | videosource.cpp \
21 | videoframe.cpp \
22 | surface.cpp \
23 | yuv2rgb.cpp \
24 | openglinterop.cpp \
25 | fileprocessor.cpp
26 |
27 | win32: SOURCES += d3d9decoder.cpp \
28 | d3d9interop.cpp \
29 | surfaced3d9.cpp
30 |
31 | linux-g++: SOURCES += vaapidecoder.cpp \
32 | surfacevaapi.cpp
33 |
34 | RESOURCES += qml.qrc
35 |
36 | # Additional import path used to resolve QML modules in Qt Creator's code model
37 | QML_IMPORT_PATH =
38 |
39 | # Additional import path used to resolve QML modules just for Qt Quick Designer
40 | QML_DESIGNER_IMPORT_PATH =
41 |
42 | # Default rules for deployment.
43 | qnx: target.path = /tmp/$${TARGET}/bin
44 | else: unix:!android: target.path = /opt/$${TARGET}/bin
45 | !isEmpty(target.path): INSTALLS += target
46 |
47 | HEADERS += \
48 | hwdecoder.h \
49 | hwdecoderfactory.h \
50 | videorenderer.h \
51 | framerenderer.h \
52 | videosource.h \
53 | videoframe.h \
54 | surface.h \
55 | yuv2rgb.h \
56 | openglinterop.h \
57 | fileprocessor.h
58 |
59 | win32: HEADERS += d3d9decoder.h \
60 | d3d9interop.h \
61 | surfaced3d9.h
62 |
63 | linux-g++: HEADERS += vaapidecoder.h \
64 | surfacevaapi.h
65 |
66 | #Link with FFmpeg installed in Qt
67 | LIBS += -lavcodec -lavdevice -lavformat -lavutil
68 |
69 | #Link with DX libs (Windows)
70 | win32: LIBS += -ldxgi -ldxva2 -ld3d9
71 |
72 | #Link with libva libs (LINUX)
73 | linux-g++: LIBS += -lX11 -lva -lva-x11
74 |
--------------------------------------------------------------------------------
/d3d9interop.cpp:
--------------------------------------------------------------------------------
1 | #include "d3d9interop.h"
2 | #include
3 | #include
4 |
5 | D3D9Interop* D3D9Interop::instance()
6 | {
7 | static D3D9Interop helper;
8 | return &helper;
9 | }
10 |
11 | bool D3D9Interop::enabled()
12 | {
13 | return _checkWGLFunctions();
14 | }
15 |
16 | D3D9Interop::D3D9Interop()
17 | {
18 | _resetWGLFunctions();
19 | _initWGLFunctions();
20 | }
21 |
22 | D3D9Interop::~D3D9Interop()
23 | {
24 | _resetWGLFunctions();
25 | }
26 |
27 | bool D3D9Interop::_initWGLFunctions()
28 | {
29 | if (_checkWGLFunctions()) return true;
30 |
31 | //Fnadales: This must be executed on gui thread
32 | QSurfaceFormat fmt;
33 | fmt.setMajorVersion(3);
34 | fmt.setMinorVersion(3);
35 | fmt.setProfile(QSurfaceFormat::CoreProfile); //whatever this is
36 | fmt.setRenderableType(QSurfaceFormat::OpenGL);
37 |
38 | QOpenGLContext * context = new QOpenGLContext();
39 | context->setFormat(fmt);
40 | context->create();
41 |
42 | QOffscreenSurface *offscreenSurface = new QOffscreenSurface();
43 | offscreenSurface->setFormat(context->format());
44 | offscreenSurface->create();
45 |
46 | context->makeCurrent(offscreenSurface);
47 |
48 | bool hasInterop = context->hasExtension("WGL_NV_DX_interop");
49 | if (hasInterop) {
50 |
51 | wglDXOpenDeviceNV = (PFNWGLDXOPENDEVICENVPROC)context->getProcAddress("wglDXOpenDeviceNV");
52 | wglDXCloseDeviceNV = (PFNWGLDXCLOSEDEVICENVPROC)context->getProcAddress("wglDXCloseDeviceNV");
53 |
54 | wglDXRegisterObjectNV = (PFNWGLDXREGISTEROBJECTNVPROC)context->getProcAddress("wglDXRegisterObjectNV");
55 | wglDXUnregisterObjectNV = (PFNWGLDXUNREGISTEROBJECTNVPROC)context->getProcAddress("wglDXUnregisterObjectNV");
56 |
57 | wglDXLockObjectsNV = (PFNWGLDXLOCKOBJECTSNVPROC)context->getProcAddress("wglDXLockObjectsNV");
58 | wglDXUnlockObjectsNV = (PFNWGLDXUNLOCKOBJECTSNVPROC)context->getProcAddress("wglDXUnlockObjectsNV");
59 |
60 | wglDXSetResourceShareHandleNV = (PFNWGLDXSETRESOURCESHAREHANDLENVPROC)context->getProcAddress("wglDXSetResourceShareHandleNV");
61 |
62 | wglDXObjectAccessNV = (PFNWGLDXOBJECTACCESSNVPROC)context->getProcAddress("wglDXObjectAccessNV");
63 | }
64 |
65 | delete offscreenSurface;
66 | delete context;
67 |
68 | return hasInterop;
69 | }
70 |
71 | void D3D9Interop::_resetWGLFunctions()
72 | {
73 | wglDXOpenDeviceNV = nullptr;
74 | wglDXCloseDeviceNV = nullptr;
75 |
76 | wglDXRegisterObjectNV = nullptr;
77 | wglDXUnregisterObjectNV = nullptr;
78 |
79 | wglDXLockObjectsNV = nullptr;
80 | wglDXUnlockObjectsNV = nullptr;
81 |
82 | wglDXSetResourceShareHandleNV = nullptr;
83 |
84 | wglDXObjectAccessNV = nullptr;
85 | }
86 |
87 | bool D3D9Interop::_checkWGLFunctions()
88 | {
89 | return (wglDXOpenDeviceNV != nullptr &&
90 | wglDXCloseDeviceNV != nullptr &&
91 | wglDXRegisterObjectNV != nullptr &&
92 | wglDXUnregisterObjectNV != nullptr &&
93 | wglDXLockObjectsNV != nullptr &&
94 | wglDXUnlockObjectsNV != nullptr &&
95 | wglDXSetResourceShareHandleNV != nullptr &&
96 | wglDXObjectAccessNV != nullptr);
97 | }
98 |
--------------------------------------------------------------------------------
/fileprocessor.cpp:
--------------------------------------------------------------------------------
1 | #include "fileprocessor.h"
2 | #include
3 | #include
4 |
5 | FileProcessor::FileProcessor(QObject *parent)
6 | : QObject(parent),
7 | m_decoder(nullptr),
8 | m_source(nullptr),
9 | m_processing(false)
10 | {
11 | m_source = new VideoSource(this);
12 | m_decoder = HWDecoderFactory::createDecoder(this);
13 | connect(m_decoder, &HWDecoder::frameDecoded, m_source, &VideoSource::setFrame);
14 | }
15 |
16 | FileProcessor::~FileProcessor()
17 | {
18 | disconnect(m_source);
19 | }
20 |
21 | VideoSource *FileProcessor::getSource() const
22 | {
23 | return m_source;
24 | }
25 |
26 | void FileProcessor::setSource(VideoSource *source)
27 | {
28 | if (m_source != source) {
29 | m_source = source;
30 | Q_EMIT sourceChanged();
31 | }
32 | }
33 |
34 | void FileProcessor::processFile(const QString & input)
35 | {
36 | setProcessing(true);
37 | decodeFile(input);
38 | setProcessing(false);
39 | }
40 |
41 | void FileProcessor::decodeFile(const QString &input)
42 | {
43 | int video_stream, ret;
44 | AVPacket packet;
45 |
46 | /* open the input file */
47 | AVFormatContext * formatCtx = nullptr;
48 | if (avformat_open_input(&formatCtx, input.toStdString().c_str(), NULL, NULL) != 0) {
49 | qWarning() << "Cannot open input file" << input;
50 | return;
51 | }
52 |
53 | QScopedPointer inputCtx(formatCtx);
54 |
55 | if (avformat_find_stream_info(inputCtx.data(), NULL) < 0) {
56 | qWarning() << "Cannot find input stream information.";
57 | return;
58 | }
59 |
60 | /* find the video stream information */
61 | ret = av_find_best_stream(inputCtx.data(), AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
62 | if (ret < 0) {
63 | qWarning() << "Cannot find a video stream in the input file";
64 | return;
65 | }
66 | video_stream = ret;
67 | AVCodecParameters* codecParameters = inputCtx->streams[video_stream]->codecpar;
68 |
69 | if (!m_decoder->init(codecParameters)) {
70 | return;
71 | }
72 |
73 | if (!m_decoder->open()) {
74 | return;
75 | }
76 |
77 | //Decoding loop
78 | while (processing() && ret >= 0) {
79 | if ((ret = av_read_frame(inputCtx.data(), &packet)) < 0)
80 | break;
81 |
82 | if (video_stream == packet.stream_index)
83 | ret = m_decoder->decode(&packet);
84 |
85 | av_packet_unref(&packet);
86 | }
87 |
88 | m_decoder->flush();
89 | m_decoder->close();
90 | }
91 |
92 | bool FileProcessor::processing()
93 | {
94 | QMutexLocker lock(&m_mutex);
95 | return m_processing;
96 | }
97 |
98 | void FileProcessor::setProcessing(bool status)
99 | {
100 | if (processing() != status) {
101 | {
102 | QMutexLocker lock(&m_mutex);
103 | m_processing = status;
104 | }
105 |
106 | Q_EMIT processingChanged();
107 | }
108 | }
109 |
110 | void FileProcessor::processMedia(const QUrl &input)
111 | {
112 | if (processing()) {
113 | qWarning() << "File Processor is Busy!!";
114 | return;
115 | }
116 |
117 | //Call processFile in another thread
118 | QtConcurrent::run(this, &FileProcessor::processFile, input.toLocalFile());
119 | }
120 |
121 | void FileProcessor::stopProcess()
122 | {
123 | setProcessing(false);
124 | }
125 |
--------------------------------------------------------------------------------
/framerenderer.cpp:
--------------------------------------------------------------------------------
1 | #include "framerenderer.h"
2 |
3 | FrameRenderer::FrameRenderer()
4 | {
5 | initShaderProgram();
6 | initGeometry();
7 | }
8 |
9 | FrameRenderer::~FrameRenderer()
10 | {
11 | glDeleteTextures(1, &m_texture);
12 | }
13 |
14 | void FrameRenderer::initialize()
15 | {
16 | initializeOpenGLFunctions();
17 |
18 | glClearColor(0.f, 0.f, 0.f, 1.0f);
19 |
20 | glGenTextures(1,&m_texture);
21 |
22 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
23 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
24 | }
25 |
26 | void FrameRenderer::setFrame(VideoFramePtr frame)
27 | {
28 | m_frame = frame;
29 | }
30 |
31 | void FrameRenderer::initGeometry()
32 | {
33 | m_vertices.clear();
34 | m_vertices.clear();
35 |
36 | m_vertices << QVector3D(-1.0, -1.0, 0);
37 | m_vertices << QVector3D(1.0, -1.0, 0);
38 | m_vertices << QVector3D(-1.0, 1.0, 0);
39 |
40 | m_vertices << QVector3D(-1.0, 1.0, 0);
41 | m_vertices << QVector3D(1.0, -1.0, 0);
42 | m_vertices << QVector3D(1.0, 1.0, 0);
43 |
44 | m_normals << QVector3D(0.0, 0.0, 0);
45 | m_normals << QVector3D(1.0, 0.0, 0);
46 | m_normals << QVector3D(0.0, 1.0, 0);
47 |
48 | m_normals << QVector3D(0.0, 1.0, 0);
49 | m_normals << QVector3D(1.0, 0.0, 0);
50 | m_normals << QVector3D(1.0, 1.0, 0);
51 | }
52 |
53 | void FrameRenderer::render() {
54 |
55 | glDepthMask(GL_TRUE);
56 |
57 | glClearColor(0.f, 0.f, 0.f, 1.0f);
58 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
59 |
60 | glFrontFace(GL_CW);
61 | glCullFace(GL_FRONT);
62 | glEnable(GL_CULL_FACE);
63 | glEnable(GL_DEPTH_TEST);
64 |
65 | if (!m_frame.isNull())
66 | m_frame->map(m_texture);
67 |
68 | GLuint texture = m_texture;
69 | #if defined(Q_OS_WIN)
70 | texture = 0;
71 | #endif
72 | m_shaderProgram.setUniformValue("frameTexture", texture);
73 |
74 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
75 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
76 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
77 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
78 |
79 | m_shaderProgram.bind();
80 |
81 | m_shaderProgram.enableAttributeArray(m_in_tc);
82 | m_shaderProgram.enableAttributeArray(m_in_pos);
83 | m_shaderProgram.setAttributeArray(m_in_pos, m_vertices.constData());
84 | m_shaderProgram.setAttributeArray(m_in_tc, m_normals.constData());
85 |
86 | glDrawArrays(GL_TRIANGLES, 0, m_vertices.size());
87 |
88 | m_shaderProgram.disableAttributeArray(m_in_tc);
89 | m_shaderProgram.disableAttributeArray(m_in_pos);
90 |
91 | m_shaderProgram.release();
92 |
93 | if (!m_frame.isNull())
94 | m_frame->unmap();
95 |
96 | glFlush();
97 |
98 | glDisable(GL_DEPTH_TEST);
99 | glDisable(GL_CULL_FACE);
100 | }
101 |
102 | void FrameRenderer::initShaderProgram()
103 | {
104 | QOpenGLShader *vshader = new QOpenGLShader(QOpenGLShader::Vertex, &m_shaderProgram);
105 | const char *vsrc =
106 | "varying mediump vec2 interp_tc;\n"
107 | "attribute mediump vec4 in_pos;\n"
108 | "attribute mediump vec4 in_tc;\n"
109 | "\n"
110 | "void main() {\n"
111 | " interp_tc = in_tc.xy;\n"
112 | " gl_Position = in_pos;\n"
113 | "}\n";
114 | vshader->compileSourceCode(vsrc);
115 |
116 | QOpenGLShader *fshader = new QOpenGLShader(QOpenGLShader::Fragment, &m_shaderProgram);
117 | const char *fsrc =
118 | "uniform sampler2D frameTexture; \n"
119 | "varying mediump vec2 interp_tc;\n"
120 | "void main() \n"
121 | "{ \n"
122 | " gl_FragColor = texture2D(frameTexture, interp_tc);\n"
123 | "}\n";
124 | fshader->compileSourceCode(fsrc);
125 |
126 | m_shaderProgram.addShader(vshader);
127 | m_shaderProgram.addShader(fshader);
128 | m_shaderProgram.link();
129 |
130 | m_in_pos = m_shaderProgram.attributeLocation("in_pos");
131 | m_in_tc = m_shaderProgram.attributeLocation("in_tc");
132 | }
133 |
--------------------------------------------------------------------------------
/surfacevaapi.cpp:
--------------------------------------------------------------------------------
1 | #include "surfacevaapi.h"
2 | #include
3 |
4 | const int pixmap_config[] = {
5 | GLX_BIND_TO_TEXTURE_RGB_EXT, True,
6 | GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT,
7 | GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT,
8 | GLX_DOUBLEBUFFER, False,
9 | GLX_Y_INVERTED_EXT, GLX_DONT_CARE,
10 | None
11 | };
12 |
13 | const int pixmap_attribs[] = {
14 | GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT,
15 | GLX_TEXTURE_FORMAT_EXT, GLX_TEXTURE_FORMAT_RGB_EXT,
16 | None
17 | };
18 |
19 | SurfaceVAAPI::SurfaceVAAPI(VADisplay display, VASurfaceID surface,
20 | int width, int height, QObject * parent)
21 | : Surface(width, height, parent),
22 | m_glxPixmap(0),
23 | m_config(0),
24 | m_vaDisplay(display),
25 | m_surface(surface),
26 | m_x11Display(nullptr),
27 | m_pixmap(0)
28 | {
29 | resetGLXFunctions();
30 | }
31 |
32 | SurfaceVAAPI::~SurfaceVAAPI()
33 | {
34 | if (m_pixmap) {
35 | XFreePixmap(m_x11Display, m_pixmap);
36 | m_pixmap = 0;
37 | }
38 |
39 | if (m_glxPixmap) {
40 | glXReleaseTexImageEXT(m_x11Display, m_glxPixmap, GLX_FRONT_EXT);
41 | XSync(m_x11Display, False);
42 | glXDestroyPixmap(m_x11Display, m_glxPixmap);
43 | m_glxPixmap = 0;
44 | }
45 |
46 | resetGLXFunctions();
47 | }
48 |
49 | bool SurfaceVAAPI::map(GLuint name)
50 | {
51 | glBindTexture(GL_TEXTURE_2D, name);
52 |
53 | if (!ensureDisplay()) return false;
54 |
55 | if (!ensurePixmap()) return false;
56 |
57 | if (!vaDisplayIsValid(m_vaDisplay)) return false;
58 |
59 | VAStatus syncResult = vaSyncSurface(m_vaDisplay, m_surface);
60 | if (syncResult != VA_STATUS_SUCCESS) return false;
61 |
62 | VAStatus putResult = vaPutSurface(m_vaDisplay, m_surface, m_pixmap
63 | , 0, 0, m_width, m_height
64 | , 0, 0, m_width, m_height
65 | , NULL, 0, VA_FRAME_PICTURE | VA_SRC_BT709);
66 | if (putResult != VA_STATUS_SUCCESS) {
67 | return false;
68 | }
69 |
70 | XSync(m_x11Display, False);
71 |
72 | glXBindTexImageEXT(m_x11Display, m_glxPixmap, GLX_FRONT_EXT, NULL);
73 |
74 | return true;
75 | }
76 |
77 | bool SurfaceVAAPI::unmap()
78 | {
79 | glXReleaseTexImageEXT(m_x11Display, m_glxPixmap, GLX_FRONT_EXT);
80 | glBindTexture(GL_TEXTURE_2D, 0);
81 |
82 | return true;
83 | }
84 |
85 | bool SurfaceVAAPI::ensureDisplay()
86 | {
87 | if (m_x11Display && m_config) return true;
88 |
89 | m_x11Display = glXGetCurrentDisplay();
90 | if (!m_x11Display) return false;
91 |
92 | if (!initGLXFunctions()) return false;
93 |
94 | int cn=0;
95 | int xscreen = DefaultScreen(m_x11Display);
96 | GLXFBConfig *configs = glXChooseFBConfig(m_x11Display, xscreen, pixmap_config, &cn);
97 | if(cn) {
98 | m_config = configs[0];
99 | XFree(configs);
100 | return true;
101 | } else
102 | return false;
103 | }
104 |
105 | bool SurfaceVAAPI::ensurePixmap()
106 | {
107 | if (m_glxPixmap) return true;
108 |
109 | if (m_pixmap) {
110 | XFreePixmap(m_x11Display, m_pixmap);
111 | m_pixmap = 0;
112 | }
113 |
114 | XWindowAttributes xwa;
115 | XGetWindowAttributes(m_x11Display, DefaultRootWindow(m_x11Display), &xwa);
116 | m_pixmap = XCreatePixmap(m_x11Display, DefaultRootWindow(m_x11Display), m_width, m_height, xwa.depth);
117 |
118 | if (!m_pixmap) return false;
119 |
120 | m_glxPixmap = glXCreatePixmap(m_x11Display, m_config, m_pixmap, pixmap_attribs);
121 | if (!m_glxPixmap)
122 | return false;
123 |
124 | return true;
125 | }
126 |
127 | bool SurfaceVAAPI::initGLXFunctions()
128 | {
129 | if (checkGLXFunctions()) return true;
130 |
131 | if (!m_x11Display) return false;
132 |
133 | int xscreen = DefaultScreen(m_x11Display);
134 | const char * exts = glXQueryExtensionsString(m_x11Display, xscreen);
135 | if(! strstr(exts, "GLX_EXT_texture_from_pixmap"))
136 | {
137 | fprintf(stderr, "GLX_EXT_texture_from_pixmap not supported!\n");
138 | return false;
139 | }
140 |
141 | glXBindTexImageEXT = (t_glx_bind) glXGetProcAddress((const GLubyte *)"glXBindTexImageEXT");
142 | glXReleaseTexImageEXT = (t_glx_release) glXGetProcAddress((const GLubyte *)"glXReleaseTexImageEXT");
143 |
144 | return true;
145 | }
146 |
147 | void SurfaceVAAPI::resetGLXFunctions()
148 | {
149 | glXBindTexImageEXT = nullptr;
150 | glXReleaseTexImageEXT = nullptr;
151 | }
152 |
153 | bool SurfaceVAAPI::checkGLXFunctions()
154 | {
155 | return glXBindTexImageEXT != nullptr
156 | && glXReleaseTexImageEXT != nullptr;
157 | }
158 |
--------------------------------------------------------------------------------
/yuv2rgb.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2012 Andre Chen and contributors.
3 | * andre.hl.chen@gmail.com
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | *
17 | */
18 | #ifndef YUV_TO_RGB
19 | #define YUV_TO_RGB
20 |
21 | /*
22 | YUV 4:2:0 (sp for "semi-plane")image with a plane of 8 bit Y samples followed by an interleaved
23 | U/V plane containing 8 bit 2x2 subsampled chroma samples.
24 | except the interleave order of U and V is reversed.
25 |
26 | Cumbersome YUV formats(http://www.fourcc.org/yuv.php)...
27 |
28 | NV12
29 | YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved U/V plane containing 8 bit 2x2 subsampled colour difference samples.
30 | Microsoft defines this format as follows:
31 | "A format in which all Y samples are found first in memory as an array of unsigned char with an even number of lines
32 | (possibly with a larger stride for memory alignment), followed immediately by an array of unsigned char containing interleaved Cb and Cr
33 | samples (such that if addressed as a little-endian WORD type, Cb(U) would be in the LSBs and Cr(V) would be in the MSBs) with the same total
34 | stride as the Y samples. This is the preferred 4:2:0 pixel format"
35 | e.g. YYYYYYYY YYYYYYYY YYYYYYYY YYYYYYYY UVUVUVUV UVUVUVUV
36 |
37 | NV21(aka YCrCb format. the default format for camera preview images)
38 | YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved V/U plane containing 8 bit 2x2 subsampled chroma samples.
39 | The same as NV12 except the interleave order of U and V is reversed.
40 | e.g. YYYYYYYY YYYYYYYY YYYYYYYY YYYYYYYY VUVUVUVU VUVUVUVU
41 |
42 |
43 | To convert Y'UV to RGB :
44 | matrix from:
45 | |R| | 298 0 409 | | Y'- 16 |
46 | |G| = | 298 -100 -208 | | U - 128 |
47 | |B| | 298 516 0 | | V - 128 |
48 | then shift 8 bits, i.e.
49 |
50 | in integer math:
51 | R = clamp((298*(Y'-16)+409*(V-128)+128)>>8)
52 | G = clamp((298*(Y'-16)-100*(U-128)-208*(V-128)+128)>>8)
53 | B = clamp((298*(Y'-16)+516*(U-128)+128)>>8)
54 |
55 | to encode RGB to Y'UV..
56 | Y' = (( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16
57 | U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128
58 | V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128
59 | */
60 |
61 | //
62 | // [in]
63 | // alpha : alpha value if rgba
64 | // yuv : nv21 image(size=width*height*3/2)
65 | // width : must be even
66 | // height = must be even
67 | // [out]
68 | // rgb : rgb buffer(size>=width*height*3) byte order : R0 G0 B0 R1 G1 B1 R2 G2 B2
69 | // rgba : rgba buffer(size>=width*height*4) byte order : R0 G0 B0 A0 R1 G1 B1 A1 R2 G2 B2 A2
70 | bool nv21_to_rgb(unsigned char* rgb, unsigned char const* nv21, int width, int height);
71 | bool nv21_to_rgba(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height);
72 |
73 | bool nv12_to_rgb(unsigned char* rgb, unsigned char const* nv12, int width, int height);
74 | bool nv12_to_rgba(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height);
75 |
76 | // OpenCV style
77 | bool nv21_to_bgr(unsigned char* bgr, unsigned char const* nv21, int width, int height);
78 | bool nv21_to_bgra(unsigned char* bgra, unsigned char alpha, unsigned char const* nv21, int width, int height);
79 |
80 | bool nv12_to_bgr(unsigned char* bgr, unsigned char const* nv21, int width, int height);
81 |
82 |
83 | //
84 | //
85 | // to make the buile in android(activate neon), either...
86 | // method 1)
87 | // ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
88 | // LOCAL_CFLAGS += -DARM_NEON_ENABLE -mfpu=neon -flax-vector-conversions
89 | // endif
90 | // LOCAL_SRC_FILES += yuv2rgb.cpp
91 | //
92 | // (note ARM_NEON_ENABLE is defined, in case you'd like to exclude neon stuff from the build)
93 | //
94 | // method 2) (use funny suffix as NDK suggestted)
95 | // ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
96 | // LOCAL_CFLAGS += -DARM_NEON_ENABLE -flax-vector-conversions
97 | // LOCAL_SRC_FILES += yuv2rgb.cpp.neon
98 | // else
99 | // LOCAL_SRC_FILES += yuv2rgb.cpp
100 | // endif
101 | //
102 | // this compiles on GCC(android), Xcode(iOS).
103 | //
104 |
105 | #endif
106 |
--------------------------------------------------------------------------------
/hwdecoder.cpp:
--------------------------------------------------------------------------------
1 | #include "hwdecoder.h"
2 | #include
3 |
4 | AVPixelFormat HWDecoder::m_hwPixFmt = AV_PIX_FMT_NONE;
5 |
6 | HWDecoder::HWDecoder(QObject * parent)
7 | : QObject(parent),
8 | m_type(AV_HWDEVICE_TYPE_NONE),
9 | m_hwDeviceCtx(nullptr),
10 | m_decoder(nullptr),
11 | m_decoderCtx(nullptr)
12 | {
13 | av_register_all();
14 | }
15 |
16 | HWDecoder::~HWDecoder()
17 | {
18 | flush();
19 | close();
20 | }
21 |
22 | bool HWDecoder::init(AVCodecParameters* codecParameters)
23 | {
24 | if (!codecParameters) return false;
25 |
26 | m_decoder = avcodec_find_decoder(codecParameters->codec_id);
27 | if (!m_decoder) return false;
28 |
29 | m_type = av_hwdevice_find_type_by_name(m_deviceName.toStdString().c_str());
30 | if (m_type == AV_HWDEVICE_TYPE_NONE) {
31 | qWarning() << "Device type" << m_deviceName << "is not supported.";
32 | qWarning() << "Available device types:";
33 | while((m_type = av_hwdevice_iterate_types(m_type)) != AV_HWDEVICE_TYPE_NONE)
34 | qWarning() << QString::fromStdString(av_hwdevice_get_type_name(m_type));
35 | return false;
36 | }
37 |
38 | if (!(m_decoderCtx = avcodec_alloc_context3(m_decoder))) {
39 | return false;
40 | }
41 |
42 | if (avcodec_parameters_to_context(m_decoderCtx, codecParameters) < 0)
43 | return false;
44 |
45 | m_decoderCtx->get_format = getFormat;
46 | m_decoderCtx->refcounted_frames = 1;
47 |
48 | if (initHWContext(m_type) < 0)
49 | return false;
50 |
51 | return true;
52 | }
53 |
54 | int HWDecoder::initHWContext(const enum AVHWDeviceType type)
55 | {
56 | int err = 0;
57 |
58 | if ((err = av_hwdevice_ctx_create(&m_hwDeviceCtx, type,
59 | NULL, NULL, 0)) < 0) {
60 | qWarning() << "Failed to create specified HW device.";
61 | return err;
62 | }
63 | m_decoderCtx->hw_device_ctx = av_buffer_ref(m_hwDeviceCtx);
64 |
65 | return err;
66 | }
67 |
68 | bool HWDecoder::open()
69 | {
70 | if (m_decoder && m_decoderCtx
71 | && avcodec_open2(m_decoderCtx, m_decoder, NULL) == 0) {
72 | return true;
73 | }
74 |
75 | qWarning() << "Failed to open codec";
76 | return false;
77 | }
78 |
79 |
80 | void HWDecoder::close()
81 | {
82 | avcodec_free_context(&m_decoderCtx);
83 | av_buffer_unref(&m_hwDeviceCtx);
84 |
85 | m_decoderCtx = nullptr;
86 | m_hwDeviceCtx = nullptr;
87 | }
88 |
89 | void HWDecoder::flush()
90 | {
91 | if (m_decoderCtx) {
92 | QScopedPointer packet(av_packet_alloc());
93 | decode(packet.data());
94 | }
95 | }
96 |
97 | enum AVPixelFormat HWDecoder::getFormat(AVCodecContext *ctx,
98 | const enum AVPixelFormat *pix_fmts)
99 | {
100 | Q_UNUSED(ctx)
101 | const enum AVPixelFormat *p;
102 |
103 | //nadaless: Try to decode baseline profiles with HW (Android WebRTC Streams)
104 | if (ctx->profile == FF_PROFILE_H264_BASELINE)
105 | ctx->hwaccel_flags |= AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH;
106 |
107 | for (p = pix_fmts; *p != -1; p++) {
108 | if (*p == m_hwPixFmt)
109 | return *p;
110 | }
111 |
112 | qWarning() << "Failed to get HW surface format.";
113 | return AV_PIX_FMT_NONE;
114 | }
115 |
116 | int HWDecoder::decode(AVPacket *packet)
117 | {
118 |
119 | int ret = avcodec_send_packet(m_decoderCtx, packet);
120 | if (ret < 0) {
121 | qWarning() << "Error during decoding";
122 | return ret;
123 | }
124 |
125 | QScopedPointer frame;
126 | while (ret >= 0) {
127 | frame.reset(av_frame_alloc());
128 |
129 | if (!frame.data()) {
130 | qWarning() << "Can not alloc frame to get decoded data!";
131 | return AVERROR(ENOMEM);
132 | }
133 |
134 | ret = avcodec_receive_frame(m_decoderCtx, frame.data());
135 | if (ret == AVERROR(EAGAIN)) {
136 | return 0;
137 | } else if (ret < 0) {
138 | switch(ret) {
139 | case AVERROR_EOF:
140 | sendFrame(new VideoFrame());
141 | break;
142 | default:
143 | qWarning() << "Error while decoding, code:" << ret;
144 | break;
145 | }
146 | return ret;
147 | }
148 |
149 | VideoFrame* videoFrame;
150 | if (frame->format == m_hwPixFmt) {
151 | videoFrame = createHWVideoFrame(frame.data());
152 | } else {
153 | videoFrame = createSWVideoFrame(frame.data());
154 | }
155 |
156 | sendFrame(videoFrame);
157 | }
158 |
159 | return 0;
160 | }
161 |
162 | VideoFrame* HWDecoder::createSWVideoFrame(const AVFrame *frame)
163 | {
164 | Q_UNUSED(frame)
165 | Q_UNIMPLEMENTED();
166 | return new VideoFrame();
167 | }
168 |
169 | void HWDecoder::sendFrame(VideoFrame *frame)
170 | {
171 | VideoFramePtr sharedFrame(frame);
172 | Q_EMIT frameDecoded(sharedFrame);
173 | }
174 |
--------------------------------------------------------------------------------
/surfaced3d9.cpp:
--------------------------------------------------------------------------------
1 | #include "surfaced3d9.h"
2 | #include "d3d9interop.h"
3 | #include
4 |
5 | extern "C" {
6 | #include "libavutil/imgutils.h"
7 | }
8 |
9 | #include "yuv2rgb.h"
10 |
11 | #define NV12_FORMAT 0x3231564e // 2 1 V N
12 |
13 | SurfaceD3D9::SurfaceD3D9(IDirect3DSurface9 * surface, int width, int height)
14 | : Surface(width, height)
15 | , m_device(nullptr)
16 | , m_texture(nullptr)
17 | , m_surface(nullptr)
18 | , m_origSurface(nullptr)
19 | , m_shareHandle(nullptr)
20 | , gl_handleD3D(nullptr)
21 | , gl_handle(nullptr)
22 | {
23 | if (surface == nullptr) {
24 | qWarning() << Q_FUNC_INFO << "Null Surface!!!";
25 | return;
26 | }
27 |
28 | m_origSurface = surface;
29 | if (FAILED(m_origSurface->GetDevice(&m_device))) {
30 | qWarning() << Q_FUNC_INFO << "Error getting the Device from the Surface";
31 | return;
32 | }
33 |
34 | m_origSurface->GetDesc(&m_surfaceDesc);
35 |
36 | if (D3D9Interop::instance()->enabled()) {
37 | //Copy Original Surface for Zero-Copy Rendering
38 | m_device->CreateTexture(m_surfaceDesc.Width, m_surfaceDesc.Height, 1,
39 | D3DUSAGE_RENDERTARGET,
40 | D3DFMT_X8R8G8B8,
41 | m_surfaceDesc.Pool,
42 | &m_texture,
43 | &m_shareHandle);
44 |
45 | m_texture->GetSurfaceLevel(0, &m_surface);
46 | m_device->StretchRect(surface, NULL, m_surface, NULL, D3DTEXF_NONE);
47 | } else {
48 | extractSurfaceData();
49 | }
50 | }
51 |
52 | SurfaceD3D9::~SurfaceD3D9() {
53 |
54 | if (m_surface)
55 | m_surface->Release();
56 | m_surface = nullptr;
57 |
58 | if (m_texture)
59 | m_texture->Release();
60 | m_texture = nullptr;
61 |
62 | m_device = nullptr;
63 | gl_handleD3D = nullptr;
64 | gl_handle = nullptr;
65 | }
66 |
67 | bool SurfaceD3D9::map(GLuint name)
68 | {
69 | if (!D3D9Interop::instance()->enabled())
70 | return Surface::map(name);
71 |
72 | initGLFunctions();
73 |
74 | if (m_surface == nullptr) return false;
75 |
76 | // required by d3d9 not d3d10&11: https://www.opengl.org/registry/specs/NV/DX_interop2.txt
77 | D3D9Interop::instance()->wglDXSetResourceShareHandleNV(m_surface, m_shareHandle);
78 |
79 | // register the Direct3D device with GL
80 | gl_handleD3D = D3D9Interop::instance()->wglDXOpenDeviceNV(m_device);
81 | if (gl_handleD3D == NULL) {
82 | qWarning() << Q_FUNC_INFO << "wglDXOpenDeviceNV" << GetLastError();
83 | return false;
84 | }
85 |
86 | gl_handle = D3D9Interop::instance()->wglDXRegisterObjectNV(gl_handleD3D,
87 | m_surface,
88 | name,
89 | GL_TEXTURE_2D,
90 | WGL_ACCESS_READ_ONLY_NV);
91 | if (gl_handle == NULL) {
92 | qWarning() << Q_FUNC_INFO << "wglDXRegisterObjectNV" << GetLastError();
93 | return false;
94 | }
95 |
96 | bool lock = D3D9Interop::instance()->wglDXLockObjectsNV(gl_handleD3D, 1, &gl_handle);
97 | bool objectAccess = D3D9Interop::instance()->wglDXObjectAccessNV(gl_handle, WGL_ACCESS_READ_ONLY_NV);
98 |
99 | m_glFunctions.glBindTexture(GL_TEXTURE_2D, name);
100 |
101 | return lock && objectAccess;
102 | }
103 |
104 | bool SurfaceD3D9::unmap()
105 | {
106 | m_glFunctions.glBindTexture(GL_TEXTURE_2D, 0);
107 |
108 | if (!D3D9Interop::instance()->enabled()) return false;
109 |
110 | bool unlock = D3D9Interop::instance()->wglDXUnlockObjectsNV(gl_handleD3D, 1, &gl_handle);
111 | bool unregister = D3D9Interop::instance()->wglDXUnregisterObjectNV(gl_handleD3D, gl_handle);
112 | bool closeDevice = D3D9Interop::instance()->wglDXCloseDeviceNV(gl_handleD3D);
113 |
114 | gl_handleD3D = NULL;
115 | gl_handle = NULL;
116 |
117 | return unlock && unregister && closeDevice;
118 | }
119 |
120 | void SurfaceD3D9::extractSurfaceData()
121 | {
122 | if (!m_rgbData.isEmpty()) return;
123 |
124 | if (m_surfaceDesc.Format != NV12_FORMAT)
125 | {
126 | qWarning() << Q_FUNC_INFO << "Wrong format, expected NV12";
127 | return;
128 | }
129 |
130 | if (m_origSurface)
131 | {
132 | D3DLOCKED_RECT lockedRect;
133 | ZeroMemory(&lockedRect, sizeof(D3DLOCKED_RECT));
134 | HRESULT hr = m_origSurface->LockRect(&lockedRect, NULL, D3DLOCK_READONLY);
135 | if (SUCCEEDED(hr))
136 | {
137 | //Resize RGB Data Buffer
138 | size_t size = av_image_get_buffer_size(AV_PIX_FMT_RGB24, m_width, m_height, 1);
139 | m_rgbData.resize(size);
140 |
141 | //Convert NV12 Buffer to RGB stored Buffer
142 | nv21_to_bgr((unsigned char*)m_rgbData.data(),
143 | (unsigned char*)cropImage(lockedRect).data(),
144 | m_width, m_height);
145 |
146 | m_origSurface->UnlockRect();
147 | }
148 | }
149 | }
150 |
151 | QByteArray SurfaceD3D9::cropImage(D3DLOCKED_RECT & lockedRect)
152 | {
153 | size_t dstSize = av_image_get_buffer_size(AV_PIX_FMT_NV12, m_width, m_height, 1);
154 | QByteArray dstData(dstSize, 0x00);
155 | char * dstYData = dstData.data();
156 | char * dstUVData = dstYData + (m_width * m_height);
157 |
158 | char * srcYData = (char*)lockedRect.pBits;
159 | char * srcUVData = srcYData + (lockedRect.Pitch * m_surfaceDesc.Height);
160 |
161 | int chromaHeight = m_height >> 1;
162 | for (int i = 0; i < m_height; i++) {
163 | memcpy(dstYData, srcYData, m_width);
164 | srcYData += lockedRect.Pitch;
165 | dstYData += m_width;
166 |
167 | if (i < chromaHeight) {
168 | memcpy(dstUVData, srcUVData, m_width);
169 | srcUVData += lockedRect.Pitch;
170 | dstUVData += m_width;
171 | }
172 | }
173 |
174 | return dstData;
175 | }
176 |
--------------------------------------------------------------------------------
/yuv2rgb.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2012 Andre Chen and contributors.
3 | * andre.hl.chen@gmail.com
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License");
6 | * you may not use this file except in compliance with the License.
7 | * You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | *
17 | */
18 | #include "yuv2rgb.h"
19 |
20 | //-----------------------------------------------------------------------------
21 | #ifdef ARM_NEON_ENABLE
22 | #include
23 | template
24 | bool decode_yuv_neon(unsigned char* out, unsigned char const* y, unsigned char const* uv, int width, int height, unsigned char fill_alpha=0xff)
25 | {
26 | // pre-condition : width, height must be even
27 | if (0!=(width&1) || width<2 || 0!=(height&1) || height<2 || !out || !y || !uv)
28 | return false;
29 |
30 | // in & out pointers
31 | unsigned char* dst = out;
32 |
33 | // constants
34 | int const stride = width*trait::bytes_per_pixel;
35 | int const itHeight = height>>1;
36 | int const itWidth = width>>3;
37 |
38 | uint8x8_t const Yshift = vdup_n_u8(16);
39 | int16x8_t const half = vdupq_n_u16(128);
40 | int32x4_t const rounding = vdupq_n_s32(128);
41 |
42 | // tmp variable
43 | uint16x8_t t;
44 |
45 | // pixel block to temporary store 8 pixels
46 | typename trait::PixelBlock pblock = trait::init_pixelblock(fill_alpha);
47 |
48 | for (int j=0; j(rgb, nv21, nv21+(width*height), width, height);
113 | }
114 |
115 | bool nv21_to_rgb(unsigned char* rgb, unsigned char const* y, unsigned char const* uv, int width, int height) {
116 | return decode_yuv_neon(rgb, y, uv, width, height);
117 | }
118 |
119 | //------------------------------------------------------------------------------
120 | class NV21toRGBA_neon {
121 | public:
122 | enum { bytes_per_pixel = 4 };
123 | typedef uint8x8x4_t PixelBlock;
124 | static PixelBlock const init_pixelblock(unsigned char fill_alpha) {
125 | PixelBlock block;
126 | block.val[3] = vdup_n_u8(fill_alpha); // alpha channel in the last
127 | return block;
128 | }
129 | static uint8x8_t const loadvu(unsigned char const* uv) {
130 | return vld1_u8(uv);
131 | }
132 | static void store_pixel_block(unsigned char* dst, PixelBlock& pblock, uint8x8_t const& r, uint8x8_t const& g, uint8x8_t const& b) {
133 | pblock.val[0] = r;
134 | pblock.val[1] = g;
135 | pblock.val[2] = b;
136 | vst4_u8(dst, pblock);
137 | }
138 | };
139 | bool nv21_to_rgba(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height) {
140 | return decode_yuv_neon(rgba, nv21, nv21+(width*height), width, height, alpha);
141 | }
142 |
143 | //------------------------------------------------------------------------------
144 | class NV21toBGRA_neon {
145 | public:
146 | enum { bytes_per_pixel = 4 };
147 | typedef uint8x8x4_t PixelBlock;
148 | static PixelBlock const init_pixelblock(unsigned char fill_alpha) {
149 | PixelBlock block;
150 | block.val[3] = vdup_n_u8(fill_alpha); // alpha channel in the last
151 | return block;
152 | }
153 | static uint8x8_t const loadvu(unsigned char const* uv) {
154 | return vld1_u8(uv);
155 | }
156 | static void store_pixel_block(unsigned char* dst, PixelBlock& pblock, uint8x8_t const& r, uint8x8_t const& g, uint8x8_t const& b) {
157 | pblock.val[0] = b;
158 | pblock.val[1] = g;
159 | pblock.val[2] = r;
160 | vst4_u8(dst, pblock);
161 | }
162 | };
163 | bool nv21_to_bgra(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height) {
164 | return decode_yuv_neon(rgba, nv21, nv21+(width*height), width, height, alpha);
165 | }
166 |
167 | //------------------------------------------------------------------------------
168 | class NV21toBGR_neon {
169 | public:
170 | enum { bytes_per_pixel = 3 };
171 | typedef uint8x8x3_t PixelBlock;
172 | static PixelBlock const init_pixelblock(unsigned char /*fill_alpha*/) {
173 | return uint8x8x3_t();
174 | }
175 | static uint8x8_t const loadvu(unsigned char const* uv) {
176 | return vld1_u8(uv);
177 | }
178 | static void store_pixel_block(unsigned char* dst, PixelBlock& pblock, uint8x8_t const& r, uint8x8_t const& g, uint8x8_t const& b) {
179 | pblock.val[0] = b;
180 | pblock.val[1] = g;
181 | pblock.val[2] = r;
182 | vst3_u8(dst, pblock);
183 | }
184 | };
185 | bool nv21_to_bgr(unsigned char* bgr, unsigned char const* nv21, int width, int height) {
186 | return decode_yuv_neon(bgr, nv21, nv21+(width*height), width, height);
187 | }
188 |
189 | #else // !neon
190 |
191 | #ifdef __ANDROID__
192 | #warning "use SLOW YUV(nv21) decoder : Try #define ARM_NEON_ENABLE if target architecture is ARMv7a"
193 | #endif
194 |
195 | //------------------------------------------------------------------------------
196 | template
197 | bool decode_yuv(unsigned char* out, unsigned char const* yuv, int width, int height, unsigned char alpha=0xff)
198 | {
199 | // pre-condition : width and height must be even
200 | if (0!=(width&1) || width<2 || 0!=(height&1) || height<2 || !out || !yuv)
201 | return false;
202 |
203 | unsigned char* dst0 = out;
204 |
205 | unsigned char const* y0 = yuv;
206 | unsigned char const* uv = yuv + (width*height);
207 | int const halfHeight = height>>1;
208 | int const halfWidth = width>>1;
209 |
210 | int Y00, Y01, Y10, Y11;
211 | int V, U;
212 | int tR, tG, tB;
213 | for (int h=0; h0) ? (298*Y00):0;
226 | Y01 = (Y01>0) ? (298*Y01):0;
227 | Y10 = (Y10>0) ? (298*Y10):0;
228 | Y11 = (Y11>0) ? (298*Y11):0;
229 | tR = 128 + 409*V;
230 | tG = 128 - 100*U - 208*V;
231 | tB = 128 + 516*U;
232 |
233 | // 2x2 pixels result
234 | trait::store_pixel(dst0, Y00+tR, Y00+tG, Y00+tB, alpha);
235 | trait::store_pixel(dst0, Y01+tR, Y01+tG, Y01+tB, alpha);
236 | trait::store_pixel(dst1, Y10+tR, Y10+tG, Y10+tB, alpha);
237 | trait::store_pixel(dst1, Y11+tR, Y11+tG, Y11+tB, alpha);
238 | }
239 | y0 = y1;
240 | dst0 = dst1;
241 | }
242 | return true;
243 | }
244 |
245 | //------------------------------------------------------------------------------
246 | class NV21toRGB {
247 | public:
248 | enum { bytes_per_pixel = 3 };
249 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
250 | V = (*uv++) - 128;
251 | U = (*uv++) - 128;
252 | }
253 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char/*alpha*/) {
254 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
255 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
256 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
257 | }
258 | };
259 | bool nv21_to_rgb(unsigned char* rgb, unsigned char const* nv21, int width, int height) {
260 | return decode_yuv(rgb, nv21, width, height);
261 | }
262 |
263 | //------------------------------------------------------------------------------
264 | class NV12toRGB {
265 | public:
266 | enum { bytes_per_pixel = 3 };
267 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
268 | U = (*uv++) - 128;
269 | V = (*uv++) - 128;
270 | }
271 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char/*alpha*/) {
272 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
273 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
274 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
275 | }
276 | };
277 | bool nv12_to_rgb(unsigned char* rgb, unsigned char const* nv12, int width, int height) {
278 | return decode_yuv(rgb, nv12, width, height);
279 | }
280 | //------------------------------------------------------------------------------
281 | class NV21toRGBA {
282 | public:
283 | enum { bytes_per_pixel = 4 };
284 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
285 | V = (*uv++) - 128;
286 | U = (*uv++) - 128;
287 | }
288 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char alpha) {
289 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
290 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
291 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
292 | *dst++ = alpha;
293 | }
294 | };
295 |
296 | bool nv21_to_rgba(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height) {
297 | return decode_yuv(rgba, nv21, width, height, alpha);
298 | }
299 |
300 | //------------------------------------------------------------------------------
301 | class NV12toRGBA {
302 | public:
303 | enum { bytes_per_pixel = 4 };
304 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
305 | U = (*uv++) - 128;
306 | V = (*uv++) - 128;
307 | }
308 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char alpha) {
309 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
310 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
311 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
312 | *dst++ = alpha;
313 | }
314 | };
315 |
316 | bool nv12_to_rgba(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height) {
317 | return decode_yuv(rgba, nv21, width, height, alpha);
318 | }
319 |
320 | //------------------------------------------------------------------------------
321 | class NV21toBGR {
322 | public:
323 | enum { bytes_per_pixel = 3 };
324 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
325 | V = (*uv++) - 128;
326 | U = (*uv++) - 128;
327 | }
328 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char/*alpha*/) {
329 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
330 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
331 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
332 | }
333 | };
334 | bool nv21_to_bgr(unsigned char* bgr, unsigned char const* nv21, int width, int height) {
335 | return decode_yuv(bgr, nv21, width, height);
336 | }
337 |
338 | //------------------------------------------------------------------------------
339 | class NV12toBGR {
340 | public:
341 | enum { bytes_per_pixel = 3 };
342 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
343 | U = (*uv++) - 128;
344 | V = (*uv++) - 128;
345 | }
346 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char/*alpha*/) {
347 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
348 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
349 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
350 | }
351 | };
352 | bool nv12_to_bgr(unsigned char* bgr, unsigned char const* nv21, int width, int height) {
353 | return decode_yuv(bgr, nv21, width, height);
354 | }
355 |
356 | //------------------------------------------------------------------------------
357 | class NV21toBGRA {
358 | public:
359 | enum { bytes_per_pixel = 4 };
360 | static void loadvu(int& U, int& V, unsigned char const* &uv) {
361 | V = (*uv++) - 128;
362 | U = (*uv++) - 128;
363 | }
364 | static void store_pixel(unsigned char* &dst, int iR, int iG, int iB, unsigned char alpha) {
365 | *dst++ = (iB>0) ? (iB<65535 ? (unsigned char)(iB>>8):0xff):0;
366 | *dst++ = (iG>0) ? (iG<65535 ? (unsigned char)(iG>>8):0xff):0;
367 | *dst++ = (iR>0) ? (iR<65535 ? (unsigned char)(iR>>8):0xff):0;
368 | *dst++ = alpha;
369 | }
370 | };
371 |
372 | bool nv21_to_bgra(unsigned char* rgba, unsigned char alpha, unsigned char const* nv21, int width, int height) {
373 | return decode_yuv(rgba, nv21, width, height, alpha);
374 | }
375 |
376 | #endif
377 |
--------------------------------------------------------------------------------